id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20,300 | helpers.py | simonwagner_mergepbx/tools/helpers.py | import logging
import os
import sys
from . import SRC_DIR
def setup_path():
sys.path.append(SRC_DIR)
def setup_logging(log_level = logging.DEBUG):
logger = logging.getLogger()
logger.setLevel(log_level)
formatter = logging.Formatter('%(levelname)s %(name)s - %(message)s')
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(log_level)
ch.setFormatter(formatter)
logger.addHandler(ch)
| 421 | Python | .py | 14 | 26.428571 | 73 | 0.731343 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,301 | mergepbx.py | simonwagner_mergepbx/src/mergepbx.py | #!/usr/bin/env python
import sys
import os
import zipfile
from argparse import ArgumentParser
import io
from plist.nextstep import NSPlistReader
import pbxproj
from pbxproj.merge import merge_pbxs
from pbxproj.merge.pbxmerge import MergeException
import merge3
def get_argument_parser():
parser = ArgumentParser()
parser.add_argument("base",
help="base version - the last common version of mine and theirs")
parser.add_argument("mine",
help="my version")
parser.add_argument("theirs",
help="their version")
parser.add_argument("-o", "--output",
help="output path for the merged file",
default=None)
parser.add_argument("-d", "--debug",
help="enable debugger on exceptions",
action="store_true")
parser.add_argument("--dump",
help="dump input files to the specified ZIP file(useful for debugging)",
default=None)
parser.add_argument("--clean",
help="remove dangling file references in project files (obsolete, on by default)",
action="store_true",
default=True)
parser.add_argument("--no-clean",
dest="clean",
help="don't remove dangling file references in project files",
action="store_false")
return parser
def install_pdb_exception_handler():
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
def main():
log = sys.stderr
parser = get_argument_parser()
args = parser.parse_args()
if args.output:
output = args.output
else:
output = args.mine
if args.dump is not None:
#we have been asked to dump the files that will be merged
dump_files(args.dump, args.base, args.mine, args.theirs)
if args.debug:
#if debugging is enabled, install the pdb
#handler and let him handle the exception
install_pdb_exception_handler()
merge_pbx_files(args.base, args.mine, args.theirs, output, clean=args.clean)
else:
#if debugging is not enabled, simply report the exception
try:
merge_pbx_files(args.base, args.mine, args.theirs, output, clean=args.clean)
sys.exit(0)
except Exception as e:
log.write("merging failed: %s\n" % str(e))
log.write("falling back to 3-way text merge for Xcode project file...\n")
merge_text_files(args.base, args.mine, args.theirs, output)
sys.exit(1)
def merge_pbx_files(basef, minef, theirsf, mergedf, clean=False):
base, mine, theirs = read_pbxs((basef, minef, theirsf))
if clean:
for name, project in zip((basef, minef, theirsf), (base, mine, theirs)):
files_removed = project.clean_files()
if len(files_removed) > 0:
print "WARNING: %d dangling file references removed from %s" % (len(files_removed), name)
merged_project = merge_pbxs(base, mine, theirs)
pbxproj.write(mergedf, merged_project)
def read_pbxs(pbx_files):
projects = [pbxproj.read(pbx_file) for pbx_file in pbx_files]
return projects
def dump_files(dumpfile, base, mine, theirs):
files = (base, mine, theirs)
arcnames = ("base.pbxproj", "mine.pbxproj", "theirs.pbxproj")
with zipfile.ZipFile(dumpfile, "w") as zf:
for file, arcname in zip(files, arcnames):
zf.write(file, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
def merge_text_files(basef, minef, theirsf, mergedf):
baselines, mylines, theirlines = [io.open(f, "r", encoding="utf-8").read().splitlines() for f in (basef, minef, theirsf)]
result = merge3.merge(origtext=baselines, yourtext=mylines, theirtext=theirlines)
mergedlines = result["body"]
io.open(mergedf, "w", encoding="utf-8").write(str.join("\n", mergedlines))
if __name__ == "__main__":
main()
| 4,527 | Python | .py | 102 | 35.029412 | 125 | 0.621677 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,302 | __init__.py | simonwagner_mergepbx/src/merge3/__init__.py | # ============================================================================
# Copyright (C) 1988, 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
# Copyright (c) 2011-2012 University of Pennsylvania
# Copyright (c) 2013-2014 Andreas Schuh
# All rights reserved.
# ============================================================================
"""
@file diff3.py
@brief Three-way file comparison algorithm.
This is a line-by-line translation of the Text::Diff3 Perl module version
0.10 written by MIZUTANI Tociyuki <tociyuki@gmail.com>.
The Text::Diff3 Perl module in turn was based on the diff3 program:
Three way file comparison program (diff3) for Project GNU.
Copyright (C) 1988, 1989, 1992, 1993, 1994 Free Software Foundation, Inc.
Written by Randy Smith
The two-way file comparison procedure was based on the article:
P. Heckel. ``A technique for isolating differences between files.''
Communications of the ACM, Vol. 21, No. 4, page 264, April 1978.
"""
from __future__ import unicode_literals
from operator import xor
# ----------------------------------------------------------------------------
def diff3(yourtext, origtext, theirtext):
"""Three-way diff based on the GNU diff3.c by R. Smith.
@param [in] yourtext Array of lines of your text.
@param [in] origtext Array of lines of original text.
@param [in] theirtext Array of lines of their text.
@returns Array of tuples containing diff results. The tuples consist of
(cmd, loA, hiA, loB, hiB), where cmd is either one of
'0', '1', '2', or 'A'.
"""
# diff result => [(cmd, loA, hiA, loB, hiB), ...]
d2 = (diff(origtext, yourtext), diff(origtext, theirtext))
d3 = []
r3 = [None, 0, 0, 0, 0, 0, 0]
while d2[0] or d2[1]:
# find a continual range in origtext lo2..hi2
# changed by yourtext or by theirtext.
#
# d2[0] 222 222222222
# origtext ...L!!!!!!!!!!!!!!!!!!!!H...
# d2[1] 222222 22 2222222
r2 = ([], [])
if not d2[0]: i = 1
else:
if not d2[1]: i = 0
else:
if d2[0][0][1] <= d2[1][0][1]: i = 0
else: i = 1
j = i
k = xor(i, 1)
hi = d2[j][0][2]
r2[j].append(d2[j].pop(0))
while d2[k] and d2[k][0][1] <= hi + 1:
hi_k = d2[k][0][2]
r2[k].append(d2[k].pop(0))
if hi < hi_k:
hi = hi_k
j = k
k = xor(k, 1)
lo2 = r2[i][ 0][1]
hi2 = r2[j][-1][2]
# take the corresponding ranges in yourtext lo0..hi0
# and in theirtext lo1..hi1.
#
# yourtext ..L!!!!!!!!!!!!!!!!!!!!!!!!!!!!H...
# d2[0] 222 222222222
# origtext ...00!1111!000!!00!111111...
# d2[1] 222222 22 2222222
# theirtext ...L!!!!!!!!!!!!!!!!H...
if r2[0]:
lo0 = r2[0][ 0][3] - r2[0][ 0][1] + lo2
hi0 = r2[0][-1][4] - r2[0][-1][2] + hi2
else:
lo0 = r3[2] - r3[6] + lo2
hi0 = r3[2] - r3[6] + hi2
if r2[1]:
lo1 = r2[1][ 0][3] - r2[1][ 0][1] + lo2
hi1 = r2[1][-1][4] - r2[1][-1][2] + hi2
else:
lo1 = r3[4] - r3[6] + lo2
hi1 = r3[4] - r3[6] + hi2
# detect type of changes
if not r2[0]:
cmd = '1'
elif not r2[1]:
cmd = '0'
elif hi0 - lo0 != hi1 - lo1:
cmd = 'A'
else:
cmd = '2'
for d in range(0, hi0 - lo0 + 1):
(i0, i1) = (lo0 + d - 1, lo1 + d - 1)
ok0 = (0 <= i0 and i0 < len(yourtext))
ok1 = (0 <= i1 and i1 < len(theirtext))
if xor(ok0, ok1) or (ok0 and yourtext[i0] != theirtext[i1]):
cmd = 'A'
break
d3.append((cmd, lo0, hi0, lo1, hi1, lo2, hi2))
return d3
# ----------------------------------------------------------------------------
def merge(yourtext, origtext, theirtext):
res = {'conflict': 0, 'body': []}
d3 = diff3(yourtext, origtext, theirtext)
text3 = (yourtext, theirtext, origtext)
i2 = 1
for r3 in d3:
for lineno in range(i2, r3[5]):
res['body'].append(text3[2][lineno - 1])
if r3[0] == '0':
for lineno in range(r3[1], r3[2] + 1):
res['body'].append(text3[0][lineno - 1])
elif r3[0] != 'A':
for lineno in range(r3[3], r3[4] + 1):
res['body'].append(text3[1][lineno - 1])
else:
res = _conflict_range(text3, r3, res)
i2 = r3[6] + 1
for lineno in range(i2, len(text3[2]) + 1):
res['body'].append(text3[2][lineno - 1])
return res
# ----------------------------------------------------------------------------
def _conflict_range(text3, r3, res):
text_a = [] # their text
for i in range(r3[3], r3[4] + 1):
text_a.append(text3[1][i - 1])
text_b = [] # your text
for i in range(r3[1], r3[2] + 1):
text_b.append(text3[0][i - 1])
d = diff(text_a, text_b)
if _assoc_range(d, 'c') and r3[5] <= r3[6]:
res['conflict'] += 1
res['body'].append('<<<<<<<')
for lineno in range(r3[1], r3[2] + 1):
res['body'].append(text3[0][lineno - 1])
#res['body'].append('|||||||')
#for lineno in range(r3[5], r3[6] + 1):
# res['body'].append(text3[2][lineno - 1])
res['body'].append('=======')
for lineno in range(r3[3], r3[4] + 1):
res['body'].append(text3[1][lineno - 1])
res['body'].append('>>>>>>>')
return res
ia = 1
for r2 in d:
for lineno in range(ia, r2[1]):
res['body'].append(text_a[lineno - 1])
if r2[0] == 'c':
res['conflict'] += 1
res['body'].append('<<<<<<<')
for lineno in range(r2[3], r2[4] + 1):
res['body'].append(text_b[lineno - 1])
res['body'].append('=======')
for lineno in range(r2[1], r2[2] + 1):
res['body'].append(text_a[lineno - 1])
res['body'].append('>>>>>>>')
elif r2[0] == 'a':
for lineno in range(r2[3], r2[4] + 1):
res['body'].append(text_b[lineno - 1])
ia = r2[2] + 1
for lineno in range(ia, len(text_a)):
res['body'].append(text_a[lineno - 1])
return res
# ----------------------------------------------------------------------------
def _assoc_range(diff, diff_type):
for d in diff:
if d[0] == diff_type: return d
return None
# ----------------------------------------------------------------------------
def _diff_heckel(text_a, text_b):
"""Two-way diff based on the algorithm by P. Heckel.
@param [in] text_a Array of lines of first text.
@param [in] text_b Array of lines of second text.
@returns TODO
"""
d = [];
uniq = [(len(text_a), len(text_b))]
(freq, ap, bp) = ({}, {}, {})
for i in range(len(text_a)):
s = text_a[i]
freq[s] = freq.get(s, 0) + 2;
ap [s] = i;
for i in range(len(text_b)):
s = text_b[i]
freq[s] = freq.get(s, 0) + 3;
bp [s] = i;
for s, x in freq.items():
if x == 5: uniq.append((ap[s], bp[s]))
(freq, ap, bp) = ({}, {}, {})
uniq.sort(key=lambda x: x[0])
(a1, b1) = (0, 0)
while a1 < len(text_a) and b1 < len(text_b):
if text_a[a1] != text_b[b1]: break
a1 += 1
b1 += 1
for a_uniq, b_uniq in uniq:
if a_uniq < a1 or b_uniq < b1: continue
(a0, b0) = (a1, b1)
(a1, b1) = (a_uniq - 1, b_uniq - 1)
while a0 <= a1 and b0 <= b1:
if text_a[a1] != text_b[b1]: break
a1 -= 1
b1 -= 1
if a0 <= a1 and b0 <= b1:
d.append(('c', a0 + 1, a1 + 1, b0 + 1, b1 + 1))
elif a0 <= a1:
d.append(('d', a0 + 1, a1 + 1, b0 + 1, b0))
elif b0 <= b1:
d.append(('a', a0 + 1, a0, b0 + 1, b1 + 1))
(a1, b1) = (a_uniq + 1, b_uniq + 1)
while a1 < len(text_a) and b1 < len(text_b):
if text_a[a1] != text_b[b1]: break
a1 += 1
b1 += 1
return d
# ----------------------------------------------------------------------------
diff = _diff_heckel # default two-way diff function used by diff3()
| 8,584 | Python | .py | 217 | 30.926267 | 78 | 0.444218 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,303 | isa.py | simonwagner_mergepbx/src/pbxproj/isa.py | from .core import DictionaryBoundObject
from inspect import isclass
import abc
class PBXISA(object):
__metaclass__ = abc.ABCMeta
def __init__(self, identifier, *args, **kwargs):
self._identifier = identifier
super(PBXISA, self).__init__(identifier, *args, **kwargs)
@abc.abstractmethod
def get_name(self, project):
raise NotImplementedError
def get_identifier(self):
return self._identifier
class PBXISADictionaryBound(DictionaryBoundObject):
def __init__(self, identifier, isa_dict, *args, **kwargs):
super(PBXISADictionaryBound, self).__init__(isa_dict, *args, **kwargs)
class PBXBuildFile(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXBuildFile, self).__init__(*args, **kwargs)
def get_name(self, project):
if self.has_attr("fileRef"):
fileRef = self.fileRef
file = project.get_objects().get(fileRef)
name = file.get_name(project)
else:
name = "(null)"
container = project.phase_of_object(self._identifier)
return "%s in %s" % (name, container)
class AbstractPBXBuildPhase(PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(AbstractPBXBuildPhase, self).__init__(*args, **kwargs)
def get_name(self, project):
if self.has_attr("name"):
name = self.name
return name
else:
default_name = getattr(self.__class__, "DEFAULT_NAME", None)
if default_name is not None:
return default_name
else:
return "(null)"
class PBXCopyFilesBuildPhase(AbstractPBXBuildPhase, PBXISA):
pass
class PBXFrameworksBuildPhase(AbstractPBXBuildPhase, PBXISA):
DEFAULT_NAME = "Frameworks"
class PBXResourcesBuildPhase(AbstractPBXBuildPhase, PBXISA):
DEFAULT_NAME = "Resources"
class PBXShellScriptBuildPhase(AbstractPBXBuildPhase, PBXISA):
DEFAULT_NAME = "ShellScript"
class PBXSourcesBuildPhase(AbstractPBXBuildPhase, PBXISA):
DEFAULT_NAME = "Sources"
class PBXHeadersBuildPhase(AbstractPBXBuildPhase, PBXISA):
DEFAULT_NAME = "Headers"
class PBXContainerItemProxy(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXContainerItemProxy, self).__init__(*args, **kwargs)
def get_name(self, project):
return "PBXContainerItemProxy"
class PBXFileReference(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXFileReference, self).__init__(*args, **kwargs)
def get_name(self, project):
if hasattr(self, "name"):
return self.name
else:
return self.path.split("/")[-1]
class PBXGroup(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXGroup, self).__init__(*args, **kwargs)
def get_name(self, project):
if hasattr(self, "name"):
return self.name
elif hasattr(self, "path"):
return self.path
else:
return None
class PBXLegacyTarget(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXLegacyTarget, self).__init__(*args, **kwargs)
def get_name(self, project):
return self.name
class PBXNativeTarget(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXNativeTarget, self).__init__(*args, **kwargs)
def get_name(self, project):
return self.name
class PBXProject(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXProject, self).__init__(*args, **kwargs)
def get_name(self, project):
return "Project object"
def get_project_name(self, project):
targets = self.targets
objects = project.get_objects()
target_names = [objects.get(target).name for target in targets]
return target_names[0]
class PBXReferenceProxy(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXReferenceProxy, self).__init__(*args, **kwargs)
def get_name(self, project):
return self.path
class PBXTargetDependency(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXTargetDependency, self).__init__(*args, **kwargs)
def get_name(self, project):
return "PBXTargetDependency"
class PBXVariantGroup(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(PBXVariantGroup, self).__init__(*args, **kwargs)
def get_name(self, project):
return self.name
class XCBuildConfiguration(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(XCBuildConfiguration, self).__init__(*args, **kwargs)
def get_name(self, project):
return self.name
class XCConfigurationList(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(XCConfigurationList, self).__init__(*args, **kwargs)
def get_name(self, project):
targets = self.get_targets(project)
target = targets[0]
if isinstance(target, PBXProject):
project_name = target.get_project_name(project)
#whitespaces apparently get removed by XCode
#so we need to remove them too.
target_name = str.join("", project_name.split())
else:
target_name = target.get_name(project)
return "Build configuration list for %s \"%s\"" % (target.isa, target_name)
def get_targets(self, project):
targets = []
identifier = self.get_identifier()
for object_identifier, object in project.get_objects().iterobjects():
if hasattr(object, "buildConfigurationList"):
if object.buildConfigurationList == identifier:
targets.append(object)
return targets
class XCVersionGroup(PBXISA, PBXISADictionaryBound):
def __init__(self, *args, **kwargs):
super(XCVersionGroup, self).__init__(*args, **kwargs)
def get_name(self, project):
if hasattr(self, "name"):
return self.name
else:
return self.path
local_vars = dict(locals())
ISA_MAPPING = dict(
((clazz.__name__, clazz) for varname, clazz in local_vars.iteritems() if isclass(clazz) and issubclass(clazz, PBXISA))
)
def is_known(isa):
return isa in ISA_MAPPING
def create(identifier, isa_dict):
isa = isa_dict["isa"]
isa_class = ISA_MAPPING.get(isa, None)
if isa_class is None:
raise ValueError("Unknown entry in project file: %s" % isa)
return isa_class(identifier, isa_dict)
| 6,678 | Python | .py | 158 | 34.905063 | 122 | 0.656313 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,304 | writer.py | simonwagner_mergepbx/src/pbxproj/writer.py | import re
import codecs
from plist import NSPlistWriter
class PBXProjectPlistWriter(NSPlistWriter):
OBJECTID_RE = re.compile(r"^[0-9A-F]{24}")
COMMENT_BLACKLIST = frozenset((
"remoteGlobalIDString",
"TargetAttributes",
))
def __init__(self, f):
super(PBXProjectPlistWriter, self).__init__(f)
self.multiline = True
self.auto_comment = True
def write_project(self, pbxproj):
self.pbxproj = pbxproj
self.write_header()
self.write_project_dict(pbxproj)
self.write("\n")
def write_project_dict(self, pbxproj):
plist = pbxproj._plist
self.write("{")
self.indent()
for key, value in pbxproj._plist.iteritems():
self.write("\n")
if key == "objects":
self.write_object_dict(pbxproj, value)
elif key == "rootObject":
self.write("rootObject = %s /* Project object */;" % pbxproj.rootObject)
else:
self.write_dict_item(key, value)
self.deindent()
self.write(u"\n}")
def write_object_dict(self, pbxproj, object_dict):
self.write("objects = {")
self.indent()
current_isa = None
def compare_object_items(item1, item2):
key1, value1 = item1
key2, value2 = item2
return cmp(value1["isa"], value2["isa"])
sorted_object_items = sorted(object_dict.iteritems(), cmp=compare_object_items)
for key, value in sorted_object_items:
value_isa = value["isa"]
if value_isa != current_isa:
if current_isa != None:
self.deindent()
self.deindent()
self.write("\n/* End %s section */" % current_isa)
self.indent()
self.indent()
self.deindent()
self.deindent()
self.write("\n\n/* Begin %s section */" % value_isa)
self.indent()
self.indent()
current_isa = value_isa
self.write("\n")
if current_isa in set(("PBXFileReference", "PBXBuildFile")):
self.multiline = False
else:
self.multiline = True
self.write_dict_item(key, value)
#write end of last section
self.deindent()
self.deindent()
self.write("\n/* End %s section */" % current_isa)
self.indent()
self.indent()
self.deindent()
self.write(u"\n};")
def decide_multiline(self, value):
return self.multiline
def write_dict_item(self, key, value, comment = None):
if key in self.COMMENT_BLACKLIST:
old = self.auto_comment
self.auto_comment = False
super(PBXProjectPlistWriter, self).write_dict_item(key, value, comment)
self.auto_comment = old
else:
super(PBXProjectPlistWriter, self).write_dict_item(key, value, comment)
def write_string(self, string):
if self.auto_comment and PBXProjectPlistWriter.OBJECTID_RE.match(string):
comment = self.get_comment_for_object(string)
super(PBXProjectPlistWriter, self).write_string(string)
if comment != None:
self.write(" /* %s */" % comment)
else:
super(PBXProjectPlistWriter, self).write_string(string)
def write_dict_key(self, key, value, comment = None, comment_before_value = False):
if not isinstance(value, dict) or value.get("isa", None) != "PBXProject":
super(PBXProjectPlistWriter, self).write_dict_key(key, value, comment, comment_before_value)
else:
comment = "Project object"
super(PBXProjectPlistWriter, self).write_string(key)
if comment_before_value and comment != None:
self.write(u" /* " + comment + " */")
def get_comment_for_object(self, identifier):
try:
object = self.pbxproj.get_objects().get(identifier)
except KeyError:
return None # if object does not exist, make no comment about it
if object == None:
return None
else:
return object.get_name(self.pbxproj)
def write_pbx(fname_or_f, data, encoding=None):
if encoding is None:
encoding = data.get_encoding() or "utf-8"
#open file if fname_or_f is a string
#else use it as f
if isinstance(fname_or_f, str) or isinstance(fname_or_f, unicode):
f = codecs.open(fname_or_f, "w", encoding=encoding)
else:
f = fname_or_f
#write project
w = PBXProjectPlistWriter(f)
w.write_project(data)
| 4,746 | Python | .py | 117 | 29.880342 | 104 | 0.577107 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,305 | __init__.py | simonwagner_mergepbx/src/pbxproj/__init__.py | from . import core
from . import isa
from . import reader
from . import writer
from . import pbxobjects
from . import merge
read = reader.read_pbx
write = writer.write_pbx
| 173 | Python | .py | 8 | 20.5 | 24 | 0.792683 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,306 | core.py | simonwagner_mergepbx/src/pbxproj/core.py | class DictionaryBoundObject(object):
def __init__(self, dict, restricted_to_keys=None):
self._dict = dict
assert self._dict != None
self._restricted_to_keys = set(restricted_to_keys) if restricted_to_keys != None else None
def __getattr__(self, attr):
if not attr.startswith("_") and self._can_access(attr):
return self._dict[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if not attr.startswith("_") and attr in self._dict and self._can_access(attr):
self._dict[attr] = value
else:
return super(DictionaryBoundObject, self).__setattr__(attr, value)
def _get_dict(self):
return self._dict
def iterkeys():
return self._dict.iterkeys()
def has_attr(self, attr):
return attr in self._dict
def get_attr(self, attr, default=Ellipsis):
if self.has_attr(attr):
return self.get_attr(attr)
else:
if default is not Ellipsis:
return default
else:
raise KeyError(attr)
def _can_access(self, attr):
return self._restricted_to_keys == None or attr in self._restricted_to_keys
| 1,249 | Python | .py | 31 | 30.935484 | 98 | 0.601156 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,307 | pbxobjects.py | simonwagner_mergepbx/src/pbxproj/pbxobjects.py | from itertools import chain
from .core import DictionaryBoundObject
from . import isa
class PBXProjFile(DictionaryBoundObject):
MAPPED_ATTRIBUTES = ("archiveVersion", "objectVersion", "rootObject")
def __init__(self, plist, ignore_unknown_objects=False, encoding=None):
super(self.__class__, self).__init__(plist, self.__class__.MAPPED_ATTRIBUTES)
self._plist = plist
self._classes = PBXClasses(self._plist["classes"])
self._objects = PBXObjects(self._plist["objects"], ignore_unknown_objects)
self._load_phases()
self._encoding = encoding
def _load_phases(self):
phases = []
for identifier, object in self._objects.getobjects():
if isinstance(object, isa.AbstractPBXBuildPhase):
phases.append(object)
self._phases_files = {
phase : set(phase.files) for phase in phases
}
def get_objects(self):
return self._objects
def phase_of_object(self, identifier):
for phase, files in self._phases_files.iteritems():
if identifier in files:
return phase.get_name(self)
def get_encoding(self):
return self._encoding
def clean_files(self):
#remove PBXBuildFile entries that are no longer referenced
files = self._objects.getobjects(isa="PBXBuildFile")
removed_files = []
for (identifier, file) in files:
if not file.has_attr("fileRef"):
continue
if not file.fileRef in self._objects:
removed_files.append(identifier)
self._objects.delete(identifier)
return removed_files
class PBXClasses(object):
def __init__(self, data_dict):
self.data_dict = data_dict
class PBXObjects(object):
def __init__(self, data_dict, ignore_unknown_objects):
self.data_dict = data_dict
self.ignore_unknown_objects = ignore_unknown_objects
def keys(self):
return self.data_dict.keys()
def get(self, key, default=Ellipsis):
if key not in self.data_dict and default is not Ellipsis:
return default
return self._make_isa_object(key, self.data_dict[key])
def __contains__(self, key):
return key in self.data_dict
def delete(self, key):
del self.data_dict[key]
def iterobjects(self, isa=None):
if self.ignore_unknown_objects:
items_iter = ((key, value) for key,value in self.data_dict.iteritems() if isa.is_known(value["isa"]))
else:
items_iter = self.data_dict.iteritems()
return (
(key, self._make_isa_object(key, value)) for key, value in items_iter \
if isa == None or value["isa"] == isa
)
def getobjects(self, isa=None):
return tuple(self.iterobjects(isa))
def getobject(self, isa):
found_objects = self.getobjects(isa)
if len(found_objects) >= 1:
return self.getobjects(isa)[0][1]
else:
return None
def _make_isa_object(self, identifier, isa_dict):
return isa.create(identifier, isa_dict)
| 3,150 | Python | .py | 75 | 33.066667 | 113 | 0.629096 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,308 | reader.py | simonwagner_mergepbx/src/pbxproj/reader.py | try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from plist import NSPlistReader, XMLPlistReader
from .pbxobjects import PBXProjFile
def read_pbx(pbx_file, ignore_unknown_objects=False):
fname_or_f = pbx_file
#open file if fname_or_f is a string
#else use it as f
if isinstance(fname_or_f, str) or isinstance(fname_or_f, unicode):
f = open(fname_or_f)
fname = fname_or_f
else:
f = fname_or_f
fname = None
#sniff the file and choose the right reader implementation
reader_impl, f = _sniff_plist(f)
#read project
reader = reader_impl(f, name=fname)
return PBXProjFile(reader.read(), encoding=reader.get_encoding())
def _sniff_plist(pbx_file):
buffer = StringIO(pbx_file.read())
pbx_file.close()
first_line = buffer.readline()
if "<?xml" in first_line:
reader_impl = XMLPlistReader
else:
reader_impl = NSPlistReader
#reset buffer
buffer.reset()
return (reader_impl, buffer)
| 1,028 | Python | .py | 32 | 27 | 70 | 0.685484 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,309 | coremerge.py | simonwagner_mergepbx/src/pbxproj/merge/coremerge.py | from collections import namedtuple, OrderedDict
from itertools import chain, izip
from orderedset import OrderedSet
KeysDictDiff = namedtuple("KeysDictDiff", ("added", "deleted", "common"))
KeysDictDiff3 = namedtuple("KeysDictDiff", ("mine_added", "theirs_added", "deleted", "common", "conflicting_deleted"))
DictDiff = namedtuple("DictDiff", ("added", "deleted", "updated", "common"))
DictDiff3 = namedtuple("DictDiff3", ("mine_added", "theirs_added", "deleted", "mine_updated", "theirs_updated", "conflicting", "common"))
SetDiff3 = namedtuple("SetDiff3", ("added", "deleted", "common"))
def diff_dict_keys(base, mine):
base_keys = set(base.iterkeys())
mine_keys = set(mine.iterkeys())
added = mine_keys - base_keys
deleted = base_keys - mine_keys
common = base_keys & mine_keys
return KeysDictDiff(added, deleted, common)
def diff3_dict_keys(base, mine, theirs):
base_keys = set(base.iterkeys())
mine_keys = set(mine.iterkeys())
theirs_keys = set(theirs.iterkeys())
diff_base_with_mine = diff_dict_keys(base, mine)
diff_base_with_theirs = diff_dict_keys(base, theirs)
#only the ones which are added exclusivly by mine or theirs
#can be merged without possible conflicts
mine_added = diff_base_with_mine.added - diff_base_with_theirs.added
theirs_added = diff_base_with_theirs.added - diff_base_with_mine.added
#those which are added by both might contain conflicts in their values
#those keys will be saved in common later on
#only the ones which are deleted in both can be savely deleted
deleted = diff_base_with_mine.deleted & diff_base_with_theirs.deleted
#keys that are still present in mine or theirs might have been edited
#and therefore conflict
conflicting_deleted = (mine_keys & diff_base_with_theirs.deleted) | (theirs_keys & diff_base_with_theirs.deleted)
#common keys between mine and theirs (includes keys added by both)
common = (mine_keys & theirs_keys)
return KeysDictDiff3(mine_added, theirs_added, deleted, common, conflicting_deleted)
def diff_dict(base, mine, test_updated = lambda a,b: a != b):
keys_diff = diff_dict_keys(base, mine)
updated_keys = set()
for key in keys_diff.common:
if test_updated(base[key], mine[key]):
updated_keys.add(key)
common = keys_diff.common - updated_keys
return DictDiff(added=keys_diff.added, deleted=keys_diff.deleted, updated=updated_keys, common=common)
def diff3_dict(base, mine, theirs, test_updated = lambda a,b: a != b):
diff_base_with_mine = diff_dict(base, mine, test_updated)
diff_base_with_theirs = diff_dict(base, theirs, test_updated)
diff_mine_with_theirs = diff_dict(mine, theirs, test_updated)
conflicting_update = (diff_base_with_mine.updated & diff_base_with_theirs.updated) - diff_mine_with_theirs.common
conflicting_added = (diff_base_with_mine.added | diff_base_with_theirs.added) & diff_mine_with_theirs.updated
conflicting_deleted = (diff_base_with_mine.deleted & diff_base_with_theirs.updated) | (diff_base_with_theirs.deleted & diff_base_with_mine.updated)
conflicting = conflicting_update | conflicting_added | conflicting_deleted
mine_added = diff_base_with_mine.added - conflicting
theirs_added = diff_base_with_theirs.added - conflicting
deleted = (diff_base_with_theirs.deleted | diff_base_with_mine.deleted) - conflicting
mine_updated = diff_base_with_mine.updated - conflicting
theirs_updated = diff_base_with_theirs.updated - conflicting
common = diff_base_with_mine.common & diff_base_with_theirs.common
return DictDiff3(mine_added=mine_added, theirs_added=theirs_added, deleted=deleted, mine_updated=mine_updated, theirs_updated=theirs_updated, conflicting=conflicting, common=common)
def diff3_set(base, mine, theirs):
added = (mine - base) | (theirs - base)
deleted = set(e for e in base if not e in mine or not e in theirs)
common = base & mine & theirs
return SetDiff3(added=added, deleted=deleted, common=common)
def merge_dict(diff, base, mine, theirs):
assert(len(diff.conflicting) == 0)
result = {}
for key in chain(diff.mine_added, diff.mine_updated):
result[key] = mine[key]
for key in chain(diff.theirs_added, diff.theirs_updated):
result[key] = theirs[key]
for key in diff.common:
result[key] = mine[key]
return result
def merge_key_order(result, base, mine, theirs):
base_keys, mine_keys, theirs_keys = (OrderedSet(k) for k in (base.iterkeys(), mine.iterkeys(), theirs.iterkeys()))
keys_diff = diff3_set(base_keys, mine_keys, theirs_keys)
keys_merged = merge_ordered_set(keys_diff, base_keys, mine_keys, theirs_keys)
result = OrderedDict((key, result[key]) for key in keys_merged)
return result
def merge_ordered_dict(diff, base, mine, theirs):
unordered = merge_dict(diff, base, mine, theirs)
result = merge_key_order(unordered, base, mine, theirs)
return result
# merge an ordered so that with the following holds true
# MINE: m1 - M
# / \
# THEIRS: t1 ----> t2
# the result of the merged version of m1 and t1 (which is called M)
# should be the same when merging t1 and M (that means t1 + diff(t1, M) = M)
def merge_ordered_set(diff, base, mine, theirs):
mine_merged = mine - diff.deleted
theirs_merged = theirs - diff.deleted
mine_added = mine_merged - theirs_merged
merged = OrderedSet()
theirs_merged_iter = iter(theirs_merged)
mine_merged_iter = iter(mine_merged)
for theirs_el in theirs_merged_iter:
for mine_el in mine_merged_iter:
if mine_el in mine_added:
merged.add(mine_el)
else:
break
merged.add(theirs_el)
for mine_el in mine_merged_iter:
merged.add(mine_el)
return merged
def merge_set(diff, base, mine, theirs):
return diff.common | diff.added
| 5,928 | Python | .py | 108 | 49.740741 | 185 | 0.708557 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,310 | pbxmerge.py | simonwagner_mergepbx/src/pbxproj/merge/pbxmerge.py | from collections import namedtuple, OrderedDict
from orderedset import OrderedSet
from inspect import isclass
import logging
from .coremerge import *
from ..pbxobjects import PBXProjFile
class MergeConflict(object):
def __init__(self, *args, **kwargs):
if not "msg" in kwargs:
raise Exception("msg keyword argument is required for %s" % self.__class__.__name__)
self.msg = kwargs["msg"]
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.msg)
class MergeConflictException(Exception):
pass
class MergeException(Exception):
pass
class MergeStrategyManager(object):
def __init__(self, strategies = {}, *args, **kwargs):
strategies = dict(strategies)
strategies.update(kwargs)
self.strategies = {}
for key, strategie_class in strategies.iteritems():
self.strategies[key] = strategie_class(self)
def get_merger(self, strategy):
if not strategy in self.strategies:
raise Exception("strategy not found: %s" % strategy)
else:
return self.strategies[strategy]
def get_project_file_merger(self):
return self.get_merger("PBXProjectFileMerger3")
class Merger(object):
def __init__(self, manager):
self.manager = manager
class _SimpleDictMerger3(Merger):
IGNORE_CONFLICTS_IN_KEYS = set()
def __init__(self, manager):
super(_SimpleDictMerger3, self).__init__(manager)
self.logger = logging.getLogger()
self.merging_functions = {}
for attr_name in dir(self):
attr = getattr(self, attr_name)
if callable(attr) and attr_name.startswith("merge_"):
key = attr_name.replace("merge_", "", 1)
self.merging_functions[key] = attr
def merge(self, base, mine, theirs):
#make a diff for merging
diff3 = diff3_dict(base, mine, theirs)
#remove conflicts in ignored keys
#where we have a special merging function
for key in self.merging_functions:
if key in diff3.conflicting:
diff3.conflicting.remove(key)
#test if we still have conflicts after removing all keys
#that have an auto_merge function
if len(diff3.conflicting) > 0:
result = self.handle_conflicts(base, mine, theirs, diff3)
else:
result = merge_dict(diff3, base, mine, theirs)
assert(result != None)
#resolve conflicts with the declared auto merge functions
result = self.auto_merge(base, mine, theirs, result, diff3)
assert(result != None)
#in the end, sort result
result = merge_key_order(result, base, mine, theirs)
return result
def auto_merge(self, base, mine, theirs, result, diff3):
for name, function in self.merging_functions.iteritems():
result = function(base, mine, theirs, result, diff3)
return result
def handle_conflicts(self, base, mine, theirs, diff3):
raise MergeConflictException("conflict while merging with %s merger" % self.__class__.__name__)
def create_auto_merge_set(attribute, optional = False):
def auto_merge_set(self, base, mine, theirs, result, diff3):
if optional and (False, False, False) == (attribute in base, attribute in mine, attribute in theirs):
return result
values = _get_3(attribute, base, mine, theirs, optional = optional, replace_value = ())
values = Value3(*(OrderedSet(v) for v in values)) #convert to OrderedSet
values_diff = diff3_set(OrderedSet(values.base), OrderedSet(values.mine), OrderedSet(values.theirs))
result[attribute] = tuple(merge_ordered_set(values_diff, values.base, values.mine, values.theirs))
return result
return auto_merge_set
def create_auto_merge_dict(attribute, optional = False):
def auto_merge_dict(self, base, mine, theirs, result, diff3):
if optional and (False, False, False) == (attribute in base, attribute in mine, attribute in theirs):
return result
values = _get_3(attribute, base, mine, theirs, optional = optional, replace_value = {})
values_diff = diff3_dict(values.base, values.mine, values.theirs)
if len(values_diff.conflicting) > 0:
raise MergeException("can't merge %s, conflicting values in dictionary: %r" % (attribute, values_diff.conflicting))
result[attribute] = merge_ordered_dict(values_diff, values.base, values.mine, values.theirs)
return result
return auto_merge_dict
class PBXProjectFileMerger3(Merger):
SUPPORTED_ARCHIVE_VERSIONS = set((1,))
SUPPORTED_OBJECT_VERSIONS = set((46,))
def merge(self, base, mine, theirs):
result = OrderedDict()
#check if the encoding for all project is
#the same, otherwise abort
if base.get_encoding() != theirs.get_encoding() or base.get_encoding() != mine.get_encoding():
raise ValueError("merging projects with different encoding (base=%s, mine=%s, theirs=%s) is not supported." % (base.get_encoding(), mine.get_encoding(), theirs.get_encoding()))
encoding = mine.get_encoding()
#use plist for merging
base, mine, theirs = (base._plist, mine._plist, theirs._plist)
self.merge_archiveVersion(result, base, mine, theirs)
self.merge_classes(result, base, mine, theirs)
self.merge_objectVersion(result, base, mine, theirs)
self.merge_objects(result, base, mine, theirs)
self.merge_rootObject(result, base, mine, theirs)
return PBXProjFile(result, encoding=encoding)
def merge_archiveVersion(self, result, base, mine, theirs):
archiveVersion = _get_3("archiveVersion", base, mine, theirs)
if not archiveVersion.base == archiveVersion.mine or not archiveVersion.base == archiveVersion.theirs:
raise MergeException("can not merge projects with different archiveVersion")
if not int(archiveVersion.base) in self.SUPPORTED_ARCHIVE_VERSIONS:
raise MergeException("can not merge projects with archiveVersion %s" % archiveVersion.base)
result["archiveVersion"] = archiveVersion.base
def merge_classes(self, result, base, mine, theirs):
classes = _get_3("classes", base, mine, theirs)
if tuple(len(d) for d in classes) == (0,0,0):
result["classes"] = {}
else:
raise MergeException("merging classes in pbxproj not supported")
def merge_objectVersion(self, result, base, mine, theirs):
objectVersion = _get_3("objectVersion", base, mine, theirs)
if not objectVersion.base == objectVersion.mine or not objectVersion.base == objectVersion.theirs:
raise MergeException("can not merge projects with different objectVersion")
if not int(objectVersion.base) in self.SUPPORTED_OBJECT_VERSIONS:
raise MergeException("can not merge projects with objectVersion %s" % objectVersion.base)
result["objectVersion"] = objectVersion.base
def merge_objects(self, result, base, mine, theirs):
base_objs, mine_objs, theirs_objs = _get_3("objects", base, mine, theirs)
diff_obj_keys = diff3_dict_keys(base_objs, mine_objs, theirs_objs)
merged_objects = {}
#add objects that are new
for added_object_key in diff_obj_keys.mine_added:
merged_objects[added_object_key] = mine_objs[added_object_key]
for added_object_key in diff_obj_keys.theirs_added:
merged_objects[added_object_key] = theirs_objs[added_object_key]
#for deleted objects, simply do not add them
#for common objects we will have to merge
for common_object_key in diff_obj_keys.common:
mine_obj_isa = mine_objs[common_object_key]["isa"]
base_obj, mine_obj, theirs_obj = _get_3(common_object_key, base_objs, mine_objs, theirs_objs, optional = True, replace_value = {"isa" : mine_obj_isa})
base_isa, mine_isa, theirs_isa = _get_3("isa", base_obj, mine_obj, theirs_obj)
if not base_isa == mine_isa or not base_isa == theirs_isa:
raise MergeException("can't merge objects whose ISA has changed. %s %s, %s, %s" %(common_object_key, base_isa, mine_isa, theirs_isa))
merger_name = base_isa + "Merger3"
merger = self.manager.get_merger(merger_name)
merged_obj = merger.merge(base_obj, mine_obj, theirs_obj)
merged_objects[common_object_key] = merged_obj
result["objects"] = merge_key_order(merged_objects, base_objs, mine_objs, theirs_objs)
def merge_rootObject(self, result, base, mine, theirs):
if mine["rootObject"] != theirs["rootObject"]:
raise MergeConflictException("conflict in rootObject, can't handle that")
else:
result["rootObject"] = mine["rootObject"]
class _AbstractPBXBuildObjectMerger3(_SimpleDictMerger3):
merge_files = create_auto_merge_set("files", optional = True)
class PBXBuildFileMerger3(_AbstractPBXBuildObjectMerger3):
pass
class PBXCopyFilesBuildPhaseMerger3(_AbstractPBXBuildObjectMerger3):
pass
class PBXFileReferenceMerger3(_SimpleDictMerger3):
def merge_lastKnownFileType(self, base, mine, theirs, result, diff3):
#special handling for lastKnownFileType
base_lastKnownFileType, mine_lastKnownFileType, theirs_lastKnownFileType = _get_3("lastKnownFileType", base, mine, theirs, optional = True, replace_value = "")
#if lastKnownFileType is nowhere present, do nothing
if (base_lastKnownFileType, mine_lastKnownFileType, theirs_lastKnownFileType) == ("", "", ""):
return result
if not base_lastKnownFileType == mine_lastKnownFileType or not base_lastKnownFileType == theirs_lastKnownFileType:
#if last known file type is different, well, then we delete it simply and let xcode figure out what it should do
#if however base is empty, then we set it to a known file type
if base_lastKnownFileType == "":
if mine_lastKnownFileType == theirs_lastKnownFileType:
lastKnownFileType = mine_lastKnownFileType
else:
lastKnownFileType = ""
else:
lastKnownFileType = ""
else:
lastKnownFileType = base_lastKnownFileType
result["lastKnownFileType"] = lastKnownFileType
return result
class PBXFrameworksBuildPhaseMerger3(_AbstractPBXBuildObjectMerger3):
pass
class PBXGroupMerger3(_SimpleDictMerger3):
merge_children = create_auto_merge_set("children")
def merge_sourceTree(self, base, mine, theirs, result, diff3):
base_sourceTree, mine_sourceTree, theirs_sourceTree = _get_3("sourceTree", base, mine, theirs)
if not base_sourceTree == mine_sourceTree or not base_sourceTree == theirs_sourceTree:
raise MergeException("can't merge PBXGroup whose sourceTree has changed")
return result
class _AbstractTargetMerger3(_SimpleDictMerger3):
merge_buildPhases = create_auto_merge_set("buildPhases", optional = True)
merge_dependencies = create_auto_merge_set("dependencies", optional = True)
merge_files = create_auto_merge_set("files", optional = True)
class PBXFrameworksBuildPhaseMerger3(_AbstractTargetMerger3):
pass
class PBXResourcesBuildPhaseMerger3(_AbstractTargetMerger3):
pass
class PBXLegacyTargetMerger3(_AbstractTargetMerger3):
pass
class PBXNativeTargetMerger3(_AbstractTargetMerger3):
pass
class PBXShellScriptBuildPhaseMerger3(_AbstractTargetMerger3):
merge_buildPhases = create_auto_merge_set("inputPaths", optional = True)
merge_dependencies = create_auto_merge_set("outputPaths", optional = True)
merge_files = create_auto_merge_set("files", optional = True)
class PBXReferenceProxyMerger3(_SimpleDictMerger3):
pass
class PBXContainerItemProxyMerger3(_SimpleDictMerger3):
pass
class PBXTargetDependencyMerger3(_SimpleDictMerger3):
pass
class PBXProjectMerger3(_SimpleDictMerger3):
merge_knownRegions = create_auto_merge_set("knownRegions", optional = True)
merge_targets = create_auto_merge_set("targets")
merge_attributes = create_auto_merge_dict("attributes", optional = True)
class PBXSourcesBuildPhaseMerger3(_SimpleDictMerger3):
merge_files = create_auto_merge_set("files")
class PBXHeadersBuildPhaseMerger3(_SimpleDictMerger3):
merge_files = create_auto_merge_set("files")
class XCBuildConfigurationMerger3(_SimpleDictMerger3):
def merge_buildSettings(self, base, mine, theirs, result, diff3):
attribute = "buildSettings"
values = _get_3(attribute, base, mine, theirs)
values_diff = diff3_dict(values.base, values.mine, values.theirs)
resolved_conflicts = {}
for conflict in values_diff.conflicting:
#check if conflict happens with a tuple or another list-like type
for value in values:
if not conflict in value:
continue
dict_value = value[conflict]
if not isinstance(dict_value, (tuple, list, set, OrderedSet)):
raise MergeException("can't merge %s, conflicting values in dictionary: %r" % (attribute, values_diff.conflicting))
#ok, we now can merge it with merge_ordered_set as we are sure that it is a tuple or something like that
#and we assume that items are unique
dict_values = Value3(values.base[conflict], values.mine[conflict], values.theirs[conflict])
dict_values_diff = diff3_set(OrderedSet(dict_values.base), OrderedSet(dict_values.mines), OrderedSet(dict_values.theirs))
resolved_conflicts[conflict] = tuple(merge_ordered_set(dict_values_diff, dict_values.base, dict_values.mines, dict_values.theirs))
values_diff.conflicting.remove(conflict) #mark as merged
result[attribute] = merge_ordered_dict(values_diff, values.base, values.mine, values.theirs)
for conflict, resolution in resolved_conflicts.iteritems():
result[attribute] = resolution
return result
class XCConfigurationListMerger3(_SimpleDictMerger3):
merge_buildConfigurations = create_auto_merge_set("buildConfigurations")
class PBXVariantGroupMerger3(_SimpleDictMerger3):
merge_children = create_auto_merge_set("children")
class XCVersionGroupMerger3(_SimpleDictMerger3):
merge_files = create_auto_merge_set("children")
Value3 = namedtuple("Value3", ("base", "mine", "theirs"))
def _get_3(key, base, mine, theirs, optional = False, replace_value = None):
if not optional:
get = lambda d, key: d[key]
else:
get = lambda d, key, replace_value=replace_value: d.get(key, replace_value)
base_value = get(base, key)
mine_value = get(mine, key)
theirs_value = get(theirs, key)
return Value3(base_value, mine_value, theirs_value)
def _find_merger(vars):
classes = ((clazz.__name__, clazz) for varname, clazz in vars.iteritems() if isclass(clazz))
merger_classes = ((name, clazz) for name, clazz in classes if issubclass(clazz, Merger) and name != "Merger")
public_classes = ((name, clazz) for name, clazz in merger_classes if not name.startswith("_"))
return dict(public_classes)
MERGER_MAPPING = _find_merger(dict(locals()))
DEFAULT_MERGE_STRATEGY_MANAGER = MergeStrategyManager(MERGER_MAPPING)
def get_project_file_merger():
return DEFAULT_MERGE_STRATEGY_MANAGER.get_project_file_merger()
| 15,600 | Python | .py | 273 | 48.820513 | 188 | 0.688863 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,311 | __init__.py | simonwagner_mergepbx/src/pbxproj/merge/__init__.py | from . import coremerge
from . import pbxmerge
from .pbxmerge import get_project_file_merger
def merge_pbxs(base, mine, theirs):
merger = get_project_file_merger()
return merger.merge(base, mine, theirs)
| 214 | Python | .py | 6 | 33 | 45 | 0.76699 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,312 | orderedset.py | simonwagner_mergepbx/src/orderedset/orderedset.py | import collections
import weakref
class OrderedSetEntry(object):
def __init__(self, *args, **kwargs):
self.prev = kwargs.get("prev", None)
self.key = kwargs.get("key", None)
self._set_next(kwargs.get("next", None))
@property
def prev(self):
if self._ref_prev is None:
return None
else:
value = self._ref_prev()
return value
@prev.setter
def prev(self, value):
if value is None:
self._ref_prev = None
else:
self._ref_prev = weakref.ref(value)
@property
def next(self):
if self._ref_next is None:
return None
else:
value = self._ref_next()
return value
def _set_next(self, value):
if value is None:
self._ref_next = None
else:
self._ref_next = weakref.ref(value)
@next.setter
def next(self, value):
self._set_next(value)
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.clear()
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
cur = OrderedSetEntry(prev=self.end.prev, key=key, next=self.end)
self.map[key] = cur
self.end.prev.next = cur
self.end.prev = cur
def clear(self):
self.end = OrderedSetEntry()
self.start = OrderedSetEntry()
self.start.key = None
self.end.key = None
self.start.next = self.end
self.end.prev = self.start
self.map = {}
def discard(self, key):
if key in self.map:
entry = self.map.pop(key)
prev = entry.prev
next = entry.next
prev.next = entry.next
next.prev = entry.prev
def __iter__(self):
end = self.end
cur = self.start.next
while not cur is end:
yield cur.key
cur = cur.next
def __reversed__(self):
start = self.start
cur = self.end.prev
while not cur is start:
yield cur.key
cur = cur.prev
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
| 2,872 | Python | .py | 91 | 22.296703 | 77 | 0.538992 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,313 | __main__.py | simonwagner_mergepbx/src/plist/__main__.py | import sys
from argparse import ArgumentParser
from .nextstep import NSPlistReader
def get_argument_parser():
parser = ArgumentParser()
parser.add_argument("file",
help="The file to parse")
parser.add_argument("debug",
help="If an exception is thrown, start the debugger",
action="store_true")
return parser
def install_pdb_exception_handler():
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
def main():
parser = get_argument_parser()
args = parser.parse_args()
if args.debug:
install_pdb_exception_handler()
f = open(args.file)
r = NSPlistReader(f, name=args.file)
plist = r.read()
if __name__ == "__main__":
main()
| 1,242 | Python | .py | 35 | 27.4 | 77 | 0.609205 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,314 | xml.py | simonwagner_mergepbx/src/plist/xml.py | from __future__ import absolute_import
from xml.dom.minidom import parse as parseXML
import xml.dom
from itertools import islice, izip, tee
from collections import OrderedDict
ENCODING_ALIASES = {
"UTF-8":"UTF8",
}
class XMLPlistReader(object):
def __init__(self, f, name=None):
self._f = f
self.name = name
self._encoding = None
self._elementVisitors = {
u"array" : self._visitArray,
u"dict" : self._visitDict,
u"key" : self._visitKey,
u"string" : self._visitString,
}
def read(self):
#parse file
dom = parseXML(self._f)
self._encoding = dom.encoding
if not dom.doctype.publicId == u"-//Apple//DTD PLIST 1.0//EN":
raise ValueError("The given file is no XML plist file")
#get root
root = dom.documentElement
mainObjectElement = _childElements(root)[0]
return self._visitElement(mainObjectElement)
def _visitElement(self, element):
tag = element.nodeName
if tag in self._elementVisitors:
visitor = self._elementVisitors[tag]
return visitor(element)
else:
raise Exception("unknown element '%s' in XML plist" % tag)
def _visitArray(self, element):
value = []
for child in _childElements(element):
childValue = self._visitElement(child)
value.append(childValue)
return value
def _visitDict(self, element):
childIter = _iterChildElements(element)
items = [(self._visitElement(keyNode), self._visitElement(valueNode)) for (keyNode, valueNode) in _itemsFromIterable(childIter)]
keys = [key for (key, value) in items]
values = [value for (key, value) in items]
return OrderedDict(zip(keys, values))
def _visitKey(self, element):
return _nodeText(element)
def _visitString(self, element):
return _nodeText(element)
def get_encoding(self):
encoding = ENCODING_ALIASES.get(self._encoding, self._encoding)
return encoding
def close(self):
self.f.close()
def _iterChildrenWithType(node, type):
for childNode in node.childNodes:
if childNode.nodeType == type:
yield childNode
def _iterChildElements(node):
return _iterChildrenWithType(node, xml.dom.Node.ELEMENT_NODE)
def _childElements(node):
return list(_iterChildElements(node))
def _nodeText(node):
inodes = _iterChildrenWithType(node, xml.dom.Node.TEXT_NODE)
return unicode.join(u"", (inode.data for inode in inodes))
def _itemsFromIterable(iter):
iters = tee(iter, 2)
ikeys = islice(iters[0], None, None, 2)
ivalues = islice(iters[1], 1, None, 2)
return izip(ikeys, ivalues)
#TODO: create XMLPlistWriter
| 2,816 | Python | .py | 74 | 30.702703 | 136 | 0.650368 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,315 | escape.py | simonwagner_mergepbx/src/plist/escape.py | import re
CONTROL_CHARS = {
u"\n" : u"\\n",
u"\t" : u"\\t",
u"\"" : u"\\\"",
u"\\" : u"\\\\",
}
ESCAPED_CHARS = dict((value, key) for key, value in CONTROL_CHARS.iteritems())
CONTROL_CHAR_RE = re.compile(
unicode.join(u"|", (u"(%s)" % re.escape(char) for char in CONTROL_CHARS.iterkeys()))
)
ESCAPED_CHARS_RE = re.compile(
unicode.join(u"|", (u"(%s)" % re.escape(char) for char in ESCAPED_CHARS.iterkeys()))
)
def escape_string(s):
escaped_s = CONTROL_CHAR_RE.sub(
lambda match: CONTROL_CHARS[match.group()],
s
)
return escaped_s
def unescape_string(s):
unescaped_s = ESCAPED_CHARS_RE.sub(
lambda match: ESCAPED_CHARS[match.group()],
s
)
return unescaped_s
| 743 | Python | .py | 26 | 24.269231 | 88 | 0.601969 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,316 | nextstep.py | simonwagner_mergepbx/src/plist/nextstep.py | import re
from collections import OrderedDict
import codecs
from itertools import izip, izip_longest
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from .antlr import PlistLexer
from .antlr import PlistParser
from .antlr.runtime import antlr3
from .escape import escape_string
class NSParsingException(Exception):
def __init__(self, line_nr, col_nr, name=None):
self.name = name
self.line_nr = line_nr
self.col_nr = col_nr
if self.name is None:
message = "Parsing failure at line %r:%r" % (self.line_nr, self.col_nr)
else:
message = "Parsing failure at line %r:%r" % (self.line_nr, self.col_nr)
super(NSParsingException, self).__init__(message)
class NSPlistReader(object):
CODEC_DEF_RE = re.compile(r"^//\s*!\$\*(.+)\*\$!$") #e.g. "// !$*UTF8*$!"
def __init__(self, f, name=None):
self.f = f
self.name = name
def read(self):
content = self.f.read()
self._encoding = self._detect_encoding(content)
unicode_content = unicode(content, encoding=self._encoding)
stream = antlr3.ANTLRStringStream(unicode_content)
lexer = PlistLexer(stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = PlistParser(tokens)
try:
plist = parser.plist()
except antlr3.exceptions.RecognitionException as e:
raise NSParsingException(line_nr=e.line, col_nr=e.charPositionInLine, name=self.name)
return plist
def _detect_encoding(self, content):
#first line may contain comment that
#includes encoding of the file
splitting = content.split("\n", 1)
first_line = splitting[0]
codec_def_match = self.__class__.CODEC_DEF_RE.match(first_line)
if codec_def_match:
codec_name = codec_def_match.group(1)
return codec_name
else:
return "ascii"
def get_encoding(self):
return self._encoding
def close(self):
self.f.close()
class IndentWriter(object):
ONLY_SPACES_RE = re.compile(r"\A\s+\Z")
def __init__(self, f, indent_char="\t", indent_size=1):
self.f = f
self.indent_char = indent_char
self.indent_size = indent_size
self.indent_count = 0
self.current_indent = ""
def indent(self):
self.indent_count += 1
self.current_indent = self.indent_char*(self.indent_count*self.indent_size)
def deindent(self):
self.indent_count -= 1
self.current_indent = self.indent_char*(self.indent_count*self.indent_size)
def write(self, s):
lines = s.splitlines(True)
indendet_lines = (self.indent_line(line) for line in lines)
self.f.write(str.join("", indendet_lines))
def indent_line(self, line):
if False:
return line
elif line.endswith("\n"):
return line + self.current_indent
else:
return line
def close(self):
self.f.close()
class NSPlistWriter(IndentWriter):
IDENTIFIER_RE = re.compile(r"^([A-Za-z0-9'_\.\$/]+)$")
def __init__(self, f, codec="utf8"):
super(NSPlistWriter, self).__init__(f, indent_char="\t", indent_size=1)
self.codec = codec.upper()
def write_plist(self, plist):
self.write_header()
self.write_value(plist)
def write_header(self):
self.write(u"// !$*%s*$!\n" % self.codec)
def decide_multiline(self, value):
return True
def write_value(self, value):
if isinstance(value, dict):
multiline = self.decide_multiline(value)
if multiline:
self.write_dict_multiline(value)
else:
self.write_dict(value)
elif isinstance(value, tuple) or isinstance(value, set) or isinstance(value, list):
multiline = self.decide_multiline(value)
if multiline:
self.write_set_multiline(value)
else:
self.write_set(value)
else:
self.write_string(value)
def write_string(self, string):
if NSPlistWriter.IDENTIFIER_RE.match(string):
self.write(string)
else:
self.write((u"\"%s\"" % escape_string(string)))
def write_dict_multiline(self, dict, comments = {}):
self.write(u"{")
self.indent()
for key, value in dict.iteritems():
self.write(u"\n")
if key in comments:
comment = comments[key]
else:
comment = None
self.write_dict_item(key, value, comment)
self.deindent()
self.write(u"\n}")
def write_dict(self, dict, comments = {}):
self.write(u"{")
self.indent()
for key, value in dict.iteritems():
if key in comments:
comment = comments[key]
else:
comment = None
self.write_dict_item(key, value, comment)
self.write(" ")
self.deindent()
self.write(u"}")
def write_dict_item(self, key, value, comment = None):
if isinstance(value, dict) or isinstance(value, tuple) or isinstance(value, set) or isinstance(value, list):
comment_before_value = True
else:
comment_before_value = False
self.write_dict_key(key, value, comment, comment_before_value)
self.write(u" = ")
self.write_value(value)
self.write(u";")
if not comment_before_value and comment != None:
self.write(u" /*" + comment + u"*/ ")
def write_dict_key(self, key, value, comment = None, comment_before_value = False):
self.write_string(key)
if comment_before_value and comment != None:
self.write(u" /*" + comment + "*/ ")
def write_set_multiline(self, values, comments = ()):
self.write(u"(")
self.indent()
for value, comment in izip_longest(values, comments, fillvalue=None):
self.write("\n")
self.write_set_item(value, comment)
self.write(u",")
self.deindent()
self.write(u"\n)")
def write_set(self, values, comments = ()):
self.write(u"(")
for value, comment in izip_longest(values, comments, fillvalue=None):
self.write_set_item(value, comment)
self.write(u", ")
self.write(u")")
def write_set_item(self, value, comment = None):
self.write_value(value)
if comment != None:
self.write(u" ")
self.write_comment(comment)
| 6,663 | Python | .py | 173 | 29.34104 | 116 | 0.592024 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,317 | PlistLexer.py | simonwagner_mergepbx/src/plist/antlr/PlistLexer.py | # $ANTLR 3.2 Sep 23, 2009 12:02:23 Plist.g 2013-12-12 18:02:36
import sys
from itertools import chain
from .runtime.antlr3 import *
from .runtime.antlr3.compat import set, frozenset
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
BRACE_OPEN=13
WS=16
ESC_SEQ=7
BRACE_CLOSE=14
WS_CHAR=5
IDENTIFIER=6
DICTIONARY_SEPERATOR=10
ARRAY_SEPERATOR=9
HEX_DIGIT=17
ASSIGNMENT=15
COMMENT=4
EOF=-1
BRACKET_CLOSE=12
STRING=8
BRACKET_OPEN=11
"""
yield all numbers from start to stop, including start and stop
"""
def range_inc(start, stop):
current = start
while current <= stop:
yield current
current += 1
class PlistLexer(Lexer):
grammarFileName = "Plist.g"
antlr_version = version_str_to_tuple("3.1 Sep 23, 2009 12:02:23")
antlr_version_str = "3.1 Sep 23, 2009 12:02:23"
def __init__(self, input=None, state=None):
if state is None:
state = RecognizerSharedState()
super(PlistLexer, self).__init__(input, state)
self.dfa7 = self.DFA7(
self, 7,
eot = self.DFA7_eot,
eof = self.DFA7_eof,
min = self.DFA7_min,
max = self.DFA7_max,
accept = self.DFA7_accept,
special = self.DFA7_special,
transition = self.DFA7_transition
)
def displayRecognitionError(self, tokenNames, exception):
pass
# $ANTLR start "COMMENT"
def mCOMMENT(self, ):
try:
_type = COMMENT
_channel = DEFAULT_CHANNEL
# Plist.g:28:5: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' | '/*' ( options {greedy=false; } : . )* '*/' )
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == 47) :
LA4_1 = self.input.LA(2)
if (LA4_1 == 47) :
alt4 = 1
elif (LA4_1 == 42) :
alt4 = 2
else:
nvae = NoViableAltException("", 4, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 4, 0, self.input)
raise nvae
if alt4 == 1:
# Plist.g:28:9: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
pass
self.match("//")
# Plist.g:28:14: (~ ( '\\n' | '\\r' ) )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
if ((0 <= LA1_0 <= 9) or (11 <= LA1_0 <= 12) or (14 <= LA1_0 <= 65535)) :
alt1 = 1
if alt1 == 1:
# Plist.g:28:14: ~ ( '\\n' | '\\r' )
pass
LA1_2 = self.input.LA(1)
if (0 <= LA1_2 <= 9) or (11 <= LA1_2 <= 12) or (14 <= LA1_2 <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop1
# Plist.g:28:28: ( '\\r' )?
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == 13) :
alt2 = 1
if alt2 == 1:
# Plist.g:28:28: '\\r'
pass
self.match(13)
self.match(10)
#action start
_channel=HIDDEN;
#action end
elif alt4 == 2:
# Plist.g:29:9: '/*' ( options {greedy=false; } : . )* '*/'
pass
self.match("/*")
# Plist.g:29:14: ( options {greedy=false; } : . )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == 42) :
LA3_1 = self.input.LA(2)
if (LA3_1 == 47) :
alt3 = 2
elif ((0 <= LA3_1 <= 46) or (48 <= LA3_1 <= 65535)) :
alt3 = 1
elif ((0 <= LA3_0 <= 41) or (43 <= LA3_0 <= 65535)) :
alt3 = 1
if alt3 == 1:
# Plist.g:29:42: .
pass
self.matchAny()
else:
break #loop3
self.match("*/")
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "COMMENT"
# $ANTLR start "IDENTIFIER"
IDENTIFIER_SET = frozenset(chain(
range_inc(0,8),
range_inc(11,12),
range_inc(14,31),
range_inc(33,33),
range_inc(35,39),
range_inc(42,43),
range_inc(45,58),
range_inc(63,122),
range_inc(124,124)
))
def mIDENTIFIER(self, ):
try:
_type = IDENTIFIER
_channel = DEFAULT_CHANNEL
# Plist.g:33:5: ( (~ ( ';' | WS_CHAR | '=' | '(' | ')' | '{' | '}' | ',' | '\"' | '<' | '>' ) )+ )
# Plist.g:33:7: (~ ( ';' | WS_CHAR | '=' | '(' | ')' | '{' | '}' | ',' | '\"' | '<' | '>' ) )+
pass
# Plist.g:33:7: (~ ( ';' | WS_CHAR | '=' | '(' | ')' | '{' | '}' | ',' | '\"' | '<' | '>' ) )+
cnt5 = 0
while True: #loop5
alt5 = 2
LA5_0 = self.input.LA(1)
if LA5_0 in self.IDENTIFIER_SET or (126 <= LA5_0 <= 65535):
alt5 = 1
if alt5 == 1:
# Plist.g:33:8: ~ ( ';' | WS_CHAR | '=' | '(' | ')' | '{' | '}' | ',' | '\"' | '<' | '>' )
pass
la = self.input.LA(1)
if la in self.IDENTIFIER_SET or (126 <= la <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt5 >= 1:
break #loop5
eee = EarlyExitException(5, self.input)
raise eee
cnt5 += 1
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "IDENTIFIER"
# $ANTLR start "STRING"
def mSTRING(self, ):
try:
_type = STRING
_channel = DEFAULT_CHANNEL
# Plist.g:37:5: ( '\"' ( ESC_SEQ | ~ ( '\\\\' | '\"' ) )* '\"' )
# Plist.g:37:8: '\"' ( ESC_SEQ | ~ ( '\\\\' | '\"' ) )* '\"'
pass
self.match(34)
# Plist.g:37:12: ( ESC_SEQ | ~ ( '\\\\' | '\"' ) )*
while True: #loop6
alt6 = 3
LA6_0 = self.input.LA(1)
if (LA6_0 == 92) :
alt6 = 1
elif ((0 <= LA6_0 <= 33) or (35 <= LA6_0 <= 91) or (93 <= LA6_0 <= 65535)) :
alt6 = 2
if alt6 == 1:
# Plist.g:37:14: ESC_SEQ
pass
self.mESC_SEQ()
elif alt6 == 2:
# Plist.g:37:24: ~ ( '\\\\' | '\"' )
pass
LA6_1 = self.input.LA(1)
if (0 <= LA6_1 <= 33) or (35 <= LA6_1 <= 91) or (93 <= LA6_1 <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop6
self.match(34)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STRING"
# $ANTLR start "ARRAY_SEPERATOR"
def mARRAY_SEPERATOR(self, ):
try:
_type = ARRAY_SEPERATOR
_channel = DEFAULT_CHANNEL
# Plist.g:41:2: ( ',' )
# Plist.g:41:4: ','
pass
self.match(44)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ARRAY_SEPERATOR"
# $ANTLR start "DICTIONARY_SEPERATOR"
def mDICTIONARY_SEPERATOR(self, ):
try:
_type = DICTIONARY_SEPERATOR
_channel = DEFAULT_CHANNEL
# Plist.g:45:2: ( ';' )
# Plist.g:45:4: ';'
pass
self.match(59)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DICTIONARY_SEPERATOR"
# $ANTLR start "BRACKET_OPEN"
def mBRACKET_OPEN(self, ):
try:
_type = BRACKET_OPEN
_channel = DEFAULT_CHANNEL
# Plist.g:49:2: ( '(' )
# Plist.g:49:4: '('
pass
self.match(40)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BRACKET_OPEN"
# $ANTLR start "BRACKET_CLOSE"
def mBRACKET_CLOSE(self, ):
try:
_type = BRACKET_CLOSE
_channel = DEFAULT_CHANNEL
# Plist.g:53:2: ( ')' )
# Plist.g:53:4: ')'
pass
self.match(41)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BRACKET_CLOSE"
# $ANTLR start "BRACE_OPEN"
def mBRACE_OPEN(self, ):
try:
_type = BRACE_OPEN
_channel = DEFAULT_CHANNEL
# Plist.g:57:2: ( '{' )
# Plist.g:57:4: '{'
pass
self.match(123)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BRACE_OPEN"
# $ANTLR start "BRACE_CLOSE"
def mBRACE_CLOSE(self, ):
try:
_type = BRACE_CLOSE
_channel = DEFAULT_CHANNEL
# Plist.g:61:2: ( '}' )
# Plist.g:61:4: '}'
pass
self.match(125)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BRACE_CLOSE"
# $ANTLR start "ASSIGNMENT"
def mASSIGNMENT(self, ):
try:
_type = ASSIGNMENT
_channel = DEFAULT_CHANNEL
# Plist.g:65:9: ( '=' )
# Plist.g:65:11: '='
pass
self.match(61)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ASSIGNMENT"
# $ANTLR start "WS"
def mWS(self, ):
try:
_type = WS
_channel = DEFAULT_CHANNEL
# Plist.g:68:4: ( ( WS_CHAR ) )
# Plist.g:68:6: ( WS_CHAR )
pass
# Plist.g:68:6: ( WS_CHAR )
# Plist.g:68:7: WS_CHAR
pass
self.mWS_CHAR()
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "WS"
# $ANTLR start "HEX_DIGIT"
def mHEX_DIGIT(self, ):
try:
# Plist.g:72:11: ( ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ) )
# Plist.g:72:13: ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' )
pass
LA0 = self.input.LA(1)
if (48 <= LA0 <= 57) or (65 <= LA0 <= 70) or (97 <= LA0 <= 102):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "HEX_DIGIT"
# $ANTLR start "ESC_SEQ"
def mESC_SEQ(self, ):
try:
# Plist.g:76:5: ( '\\\\' ( 'b' | 't' | 'n' | 'f' | 'r' | '\\\"' | '\\'' | '\\\\' ) )
# Plist.g:76:9: '\\\\' ( 'b' | 't' | 'n' | 'f' | 'r' | '\\\"' | '\\'' | '\\\\' )
pass
self.match(92)
LA0 = self.input.LA(1)
if LA0 == 34 or LA0 == 39 or LA0 == 92 or LA0 == 98 or LA0 == 102 or LA0 == 110 or LA0 == 114 or LA0 == 116:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "ESC_SEQ"
# $ANTLR start "WS_CHAR"
def mWS_CHAR(self, ):
try:
# Plist.g:80:5: ( ( ' ' | '\\t' | '\\r' | '\\n' ) )
# Plist.g:80:9: ( ' ' | '\\t' | '\\r' | '\\n' )
pass
LA0 = self.input.LA(1)
if (9 <= LA0 <= 10) or LA0 == 13 or LA0 == 32:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "WS_CHAR"
def mTokens(self):
# Plist.g:1:8: ( COMMENT | IDENTIFIER | STRING | ARRAY_SEPERATOR | DICTIONARY_SEPERATOR | BRACKET_OPEN | BRACKET_CLOSE | BRACE_OPEN | BRACE_CLOSE | ASSIGNMENT | WS )
alt7 = 11
alt7 = self.dfa7.predict(self.input)
if alt7 == 1:
# Plist.g:1:10: COMMENT
pass
self.mCOMMENT()
elif alt7 == 2:
# Plist.g:1:18: IDENTIFIER
pass
self.mIDENTIFIER()
elif alt7 == 3:
# Plist.g:1:29: STRING
pass
self.mSTRING()
elif alt7 == 4:
# Plist.g:1:36: ARRAY_SEPERATOR
pass
self.mARRAY_SEPERATOR()
elif alt7 == 5:
# Plist.g:1:52: DICTIONARY_SEPERATOR
pass
self.mDICTIONARY_SEPERATOR()
elif alt7 == 6:
# Plist.g:1:73: BRACKET_OPEN
pass
self.mBRACKET_OPEN()
elif alt7 == 7:
# Plist.g:1:86: BRACKET_CLOSE
pass
self.mBRACKET_CLOSE()
elif alt7 == 8:
# Plist.g:1:100: BRACE_OPEN
pass
self.mBRACE_OPEN()
elif alt7 == 9:
# Plist.g:1:111: BRACE_CLOSE
pass
self.mBRACE_CLOSE()
elif alt7 == 10:
# Plist.g:1:123: ASSIGNMENT
pass
self.mASSIGNMENT()
elif alt7 == 11:
# Plist.g:1:134: WS
pass
self.mWS()
# lookup tables for DFA #7
DFA7_eot = DFA.unpack(
u"\1\uffff\1\2\12\uffff\3\2\1\uffff\2\2\1\17"
)
DFA7_eof = DFA.unpack(
u"\23\uffff"
)
DFA7_min = DFA.unpack(
u"\1\0\1\52\12\uffff\3\0\1\uffff\3\0"
)
DFA7_max = DFA.unpack(
u"\1\uffff\1\57\12\uffff\3\uffff\1\uffff\3\uffff"
)
DFA7_accept = DFA.unpack(
u"\2\uffff\1\2\1\3\1\4\1\5\1\6\1\7\1\10\1\11\1\12\1\13\3\uffff\1"
u"\1\3\uffff"
)
DFA7_special = DFA.unpack(
u"\1\5\13\uffff\1\0\1\6\1\1\1\uffff\1\4\1\2\1\3"
)
DFA7_transition = [
DFA.unpack(u"\11\2\2\13\2\2\1\13\22\2\1\13\1\2\1\3\5\2\1\6\1\7\2"
u"\2\1\4\2\2\1\1\13\2\1\5\1\uffff\1\12\1\uffff\74\2\1\10\1\2\1\11"
u"\uff82\2"),
DFA.unpack(u"\1\15\4\uffff\1\14"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\11\16\2\17\2\16\1\17\22\16\1\17\1\16\1\17\5\16\2\17"
u"\2\16\1\17\16\16\4\17\74\16\1\17\1\16\1\17\uff82\16"),
DFA.unpack(u"\11\21\2\17\2\21\1\17\22\21\1\17\1\21\1\17\5\21\2\17"
u"\1\20\1\21\1\17\16\21\4\17\74\21\1\17\1\21\1\17\uff82\21"),
DFA.unpack(u"\11\16\2\17\2\16\1\17\22\16\1\17\1\16\1\17\5\16\2\17"
u"\2\16\1\17\16\16\4\17\74\16\1\17\1\16\1\17\uff82\16"),
DFA.unpack(u""),
DFA.unpack(u"\11\21\2\17\2\21\1\17\22\21\1\17\1\21\1\17\5\21\2\17"
u"\1\20\1\21\1\17\2\21\1\22\13\21\4\17\74\21\1\17\1\21\1\17\uff82"
u"\21"),
DFA.unpack(u"\11\21\2\17\2\21\1\17\22\21\1\17\1\21\1\17\5\21\2\17"
u"\1\20\1\21\1\17\16\21\4\17\74\21\1\17\1\21\1\17\uff82\21"),
DFA.unpack(u"\11\21\2\uffff\2\21\1\uffff\22\21\1\uffff\1\21\1\uffff"
u"\5\21\2\uffff\1\20\1\21\1\uffff\16\21\4\uffff\74\21\1\uffff\1\21"
u"\1\uffff\uff82\21")
]
# class definition for DFA #7
class DFA7(DFA):
pass
SET1 = frozenset(chain(
range_inc(0,8),
range_inc(11,12),
range_inc(14,31),
range_inc(33,33),
range_inc(35,39),
range_inc(42,43),
range_inc(45, 58),
range_inc(63, 122),
range_inc(124,124),
))
SET2 = frozenset(chain(
range_inc(9, 10),
range_inc(13,13),
range_inc(32, 32),
range_inc(34, 34),
range_inc(40, 41),
range_inc(44, 44),
range_inc(59, 62),
range_inc(123, 123),
range_inc(125, 125),
))
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA7_12 = input.LA(1)
s = -1
if LA7_12 in self_.SET1 or (126 <= LA7_12 <= 65535):
s = 14
elif LA7_12 in self_.SET2:
s = 15
else:
s = 2
if s >= 0:
return s
elif s == 1:
LA7_14 = input.LA(1)
s = -1
if LA7_14 in self_.SET2:
s = 15
elif LA7_14 in self_.SET1 or (126 <= LA7_14 <= 65535):
s = 14
else:
s = 2
if s >= 0:
return s
elif s == 2:
LA7_17 = input.LA(1)
s = -1
if (LA7_17 == 42):
s = 16
elif LA7_17 in self_.SET1 or (126 <= LA7_17 <= 65535):
s = 17
elif LA7_17 in self_.SET2:
s = 15
else:
s = 2
if s >= 0:
return s
elif s == 3:
LA7_18 = input.LA(1)
s = -1
if (LA7_18 == 42):
s = 16
elif LA7_18 in self_.SET1 or (126 <= LA7_18 <= 65535):
s = 17
else:
s = 15
if s >= 0:
return s
elif s == 4:
LA7_16 = input.LA(1)
s = -1
if (LA7_16 == 47):
s = 18
elif (LA7_16 == 42):
s = 16
elif LA7_16 in self_.SET1 or (126 <= LA7_16 <= 65535):
s = 17
elif LA7_16 in self_.SET2:
s = 15
else:
s = 2
if s >= 0:
return s
elif s == 5:
LA7_0 = input.LA(1)
s = -1
if (LA7_0 == 47):
s = 1
elif LA7_0 in self_.SET1 or (126 <= LA7_0 <= 65535):
s = 2
elif (LA7_0 == 34):
s = 3
elif (LA7_0 == 44):
s = 4
elif (LA7_0 == 59):
s = 5
elif (LA7_0 == 40):
s = 6
elif (LA7_0 == 41):
s = 7
elif (LA7_0 == 123):
s = 8
elif (LA7_0 == 125):
s = 9
elif (LA7_0 == 61):
s = 10
elif ((9 <= LA7_0 <= 10) or LA7_0 == 13 or LA7_0 == 32):
s = 11
if s >= 0:
return s
elif s == 6:
LA7_13 = input.LA(1)
s = -1
if (LA7_13 == 42):
s = 16
elif LA7_13 in self_.SET1 or (126 <= LA7_13 <= 65535):
s = 17
elif LA7_13 in self_.SET2:
s = 15
else:
s = 2
if s >= 0:
return s
nvae = NoViableAltException(self_.getDescription(), 7, _s, input)
self_.error(nvae)
raise nvae
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import LexerMain
main = LexerMain(PlistLexer)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| 22,194 | Python | .py | 634 | 20.970032 | 173 | 0.402122 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,318 | PlistParser.py | simonwagner_mergepbx/src/plist/antlr/PlistParser.py | # $ANTLR 3.2 Sep 23, 2009 12:02:23 Plist.g 2013-12-12 18:02:36
import sys
from .runtime.antlr3 import *
from .runtime.antlr3.compat import set, frozenset
from collections import OrderedDict
from ..escape import unescape_string
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
BRACE_OPEN=13
WS=16
ESC_SEQ=7
BRACE_CLOSE=14
WS_CHAR=5
IDENTIFIER=6
DICTIONARY_SEPERATOR=10
ARRAY_SEPERATOR=9
HEX_DIGIT=17
ASSIGNMENT=15
COMMENT=4
EOF=-1
BRACKET_CLOSE=12
STRING=8
BRACKET_OPEN=11
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"COMMENT", "WS_CHAR", "IDENTIFIER", "ESC_SEQ", "STRING", "ARRAY_SEPERATOR",
"DICTIONARY_SEPERATOR", "BRACKET_OPEN", "BRACKET_CLOSE", "BRACE_OPEN",
"BRACE_CLOSE", "ASSIGNMENT", "WS", "HEX_DIGIT"
]
class PlistParser(Parser):
grammarFileName = "Plist.g"
antlr_version = version_str_to_tuple("3.1 Sep 23, 2009 12:02:23")
antlr_version_str = "3.1 Sep 23, 2009 12:02:23"
tokenNames = tokenNames
def __init__(self, input, state=None, *args, **kwargs):
if state is None:
state = RecognizerSharedState()
super(PlistParser, self).__init__(input, state, *args, **kwargs)
def displayRecognitionError(self, tokenNames, exception):
pass
# $ANTLR start "plist"
# Plist.g:87:1: plist returns [value] : (lbl_value= dictionary | lbl_value= array );
def plist(self, ):
value = None
lbl_value = None
try:
try:
# Plist.g:91:5: (lbl_value= dictionary | lbl_value= array )
alt1 = 2
LA1_0 = self.input.LA(1)
if (LA1_0 == BRACE_OPEN) :
alt1 = 1
elif (LA1_0 == BRACKET_OPEN) :
alt1 = 2
else:
nvae = NoViableAltException("", 1, 0, self.input)
raise nvae
if alt1 == 1:
# Plist.g:91:8: lbl_value= dictionary
pass
self._state.following.append(self.FOLLOW_dictionary_in_plist474)
lbl_value = self.dictionary()
self._state.following.pop()
elif alt1 == 2:
# Plist.g:91:31: lbl_value= array
pass
self._state.following.append(self.FOLLOW_array_in_plist480)
lbl_value = self.array()
self._state.following.pop()
#action start
value = lbl_value
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "plist"
# $ANTLR start "value"
# Plist.g:93:1: value returns [value] : (lbl_value= dictionary | lbl_value= array | lbl_value= identifier | lbl_value= string );
def value(self, ):
value = None
lbl_value = None
try:
try:
# Plist.g:97:5: (lbl_value= dictionary | lbl_value= array | lbl_value= identifier | lbl_value= string )
alt2 = 4
LA2 = self.input.LA(1)
if LA2 == BRACE_OPEN:
alt2 = 1
elif LA2 == BRACKET_OPEN:
alt2 = 2
elif LA2 == IDENTIFIER:
alt2 = 3
elif LA2 == STRING:
alt2 = 4
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
# Plist.g:97:8: lbl_value= dictionary
pass
self._state.following.append(self.FOLLOW_dictionary_in_value508)
lbl_value = self.dictionary()
self._state.following.pop()
elif alt2 == 2:
# Plist.g:97:31: lbl_value= array
pass
self._state.following.append(self.FOLLOW_array_in_value514)
lbl_value = self.array()
self._state.following.pop()
elif alt2 == 3:
# Plist.g:97:49: lbl_value= identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_value520)
lbl_value = self.identifier()
self._state.following.pop()
elif alt2 == 4:
# Plist.g:97:72: lbl_value= string
pass
self._state.following.append(self.FOLLOW_string_in_value526)
lbl_value = self.string()
self._state.following.pop()
#action start
value = lbl_value
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "value"
# $ANTLR start "string"
# Plist.g:99:1: string returns [value] : lbl_string= STRING ;
def string(self, ):
value = None
lbl_string = None
try:
try:
# Plist.g:103:5: (lbl_string= STRING )
# Plist.g:103:9: lbl_string= STRING
pass
lbl_string=self.match(self.input, STRING, self.FOLLOW_STRING_in_string555)
#action start
value = unescape_string(lbl_string.text[1:-1])
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "string"
# $ANTLR start "identifier"
# Plist.g:105:1: identifier returns [value] : lbl_identifier= IDENTIFIER ;
def identifier(self, ):
value = None
lbl_identifier = None
try:
try:
# Plist.g:109:5: (lbl_identifier= IDENTIFIER )
# Plist.g:109:7: lbl_identifier= IDENTIFIER
pass
lbl_identifier=self.match(self.input, IDENTIFIER, self.FOLLOW_IDENTIFIER_in_identifier586)
#action start
value = lbl_identifier.text
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "identifier"
# $ANTLR start "array"
# Plist.g:112:1: array returns [value] : BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE ;
def array(self, ):
value = None
lbl_first_value = None
lbl_value = None
value = []
try:
try:
# Plist.g:116:5: ( BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE )
# Plist.g:116:8: BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE
pass
self.match(self.input, BRACKET_OPEN, self.FOLLOW_BRACKET_OPEN_in_array617)
# Plist.g:116:21: (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )?
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == IDENTIFIER or LA4_0 == STRING or LA4_0 == BRACKET_OPEN or LA4_0 == BRACE_OPEN) :
alt4 = 1
if alt4 == 1:
# Plist.g:116:22: lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )*
pass
self._state.following.append(self.FOLLOW_value_in_array622)
lbl_first_value = self.value()
self._state.following.pop()
#action start
value.append(lbl_first_value)
#action end
# Plist.g:116:84: ( ARRAY_SEPERATOR lbl_value= value )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == ARRAY_SEPERATOR) :
LA3_1 = self.input.LA(2)
if (LA3_1 == IDENTIFIER or LA3_1 == STRING or LA3_1 == BRACKET_OPEN or LA3_1 == BRACE_OPEN) :
alt3 = 1
if alt3 == 1:
# Plist.g:116:85: ARRAY_SEPERATOR lbl_value= value
pass
self.match(self.input, ARRAY_SEPERATOR, self.FOLLOW_ARRAY_SEPERATOR_in_array627)
self._state.following.append(self.FOLLOW_value_in_array631)
lbl_value = self.value()
self._state.following.pop()
#action start
value.append(lbl_value)
#action end
else:
break #loop3
# Plist.g:116:155: ( ARRAY_SEPERATOR )?
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == ARRAY_SEPERATOR) :
alt5 = 1
if alt5 == 1:
# Plist.g:116:156: ARRAY_SEPERATOR
pass
self.match(self.input, ARRAY_SEPERATOR, self.FOLLOW_ARRAY_SEPERATOR_in_array640)
self.match(self.input, BRACKET_CLOSE, self.FOLLOW_BRACKET_CLOSE_in_array644)
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "array"
# $ANTLR start "dictionary_key"
# Plist.g:119:1: dictionary_key returns [value] : (lbl_key= identifier | lbl_key= string ) ;
def dictionary_key(self, ):
value = None
lbl_key = None
try:
try:
# Plist.g:123:2: ( (lbl_key= identifier | lbl_key= string ) )
# Plist.g:123:6: (lbl_key= identifier | lbl_key= string )
pass
# Plist.g:123:6: (lbl_key= identifier | lbl_key= string )
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == IDENTIFIER) :
alt6 = 1
elif (LA6_0 == STRING) :
alt6 = 2
else:
nvae = NoViableAltException("", 6, 0, self.input)
raise nvae
if alt6 == 1:
# Plist.g:123:7: lbl_key= identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_dictionary_key673)
lbl_key = self.identifier()
self._state.following.pop()
elif alt6 == 2:
# Plist.g:123:28: lbl_key= string
pass
self._state.following.append(self.FOLLOW_string_in_dictionary_key679)
lbl_key = self.string()
self._state.following.pop()
#action start
value = lbl_key
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary_key"
# $ANTLR start "dictionary_entry"
# Plist.g:126:1: dictionary_entry returns [value] : lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR ;
def dictionary_entry(self, ):
value = None
lbl_key = None
lbl_value = None
try:
try:
# Plist.g:130:5: (lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR )
# Plist.g:130:8: lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR
pass
self._state.following.append(self.FOLLOW_dictionary_key_in_dictionary_entry710)
lbl_key = self.dictionary_key()
self._state.following.pop()
self.match(self.input, ASSIGNMENT, self.FOLLOW_ASSIGNMENT_in_dictionary_entry712)
self._state.following.append(self.FOLLOW_value_in_dictionary_entry716)
lbl_value = self.value()
self._state.following.pop()
self.match(self.input, DICTIONARY_SEPERATOR, self.FOLLOW_DICTIONARY_SEPERATOR_in_dictionary_entry718)
#action start
value = (lbl_key, lbl_value)
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary_entry"
# $ANTLR start "dictionary"
# Plist.g:133:1: dictionary returns [value] : BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE ;
def dictionary(self, ):
value = None
lbl_entry = None
entries = []
try:
try:
# Plist.g:140:5: ( BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE )
# Plist.g:140:8: BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE
pass
self.match(self.input, BRACE_OPEN, self.FOLLOW_BRACE_OPEN_in_dictionary758)
# Plist.g:140:19: (lbl_entry= dictionary_entry )*
while True: #loop7
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == IDENTIFIER or LA7_0 == STRING) :
alt7 = 1
if alt7 == 1:
# Plist.g:140:20: lbl_entry= dictionary_entry
pass
self._state.following.append(self.FOLLOW_dictionary_entry_in_dictionary763)
lbl_entry = self.dictionary_entry()
self._state.following.pop()
#action start
entries.append(lbl_entry)
#action end
else:
break #loop7
self.match(self.input, BRACE_CLOSE, self.FOLLOW_BRACE_CLOSE_in_dictionary769)
#action start
value = OrderedDict(entries)
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary"
# Delegated rules
FOLLOW_dictionary_in_plist474 = frozenset([1])
FOLLOW_array_in_plist480 = frozenset([1])
FOLLOW_dictionary_in_value508 = frozenset([1])
FOLLOW_array_in_value514 = frozenset([1])
FOLLOW_identifier_in_value520 = frozenset([1])
FOLLOW_string_in_value526 = frozenset([1])
FOLLOW_STRING_in_string555 = frozenset([1])
FOLLOW_IDENTIFIER_in_identifier586 = frozenset([1])
FOLLOW_BRACKET_OPEN_in_array617 = frozenset([6, 8, 9, 11, 12, 13])
FOLLOW_value_in_array622 = frozenset([9, 12])
FOLLOW_ARRAY_SEPERATOR_in_array627 = frozenset([6, 8, 11, 13])
FOLLOW_value_in_array631 = frozenset([9, 12])
FOLLOW_ARRAY_SEPERATOR_in_array640 = frozenset([12])
FOLLOW_BRACKET_CLOSE_in_array644 = frozenset([1])
FOLLOW_identifier_in_dictionary_key673 = frozenset([1])
FOLLOW_string_in_dictionary_key679 = frozenset([1])
FOLLOW_dictionary_key_in_dictionary_entry710 = frozenset([15])
FOLLOW_ASSIGNMENT_in_dictionary_entry712 = frozenset([6, 8, 11, 13])
FOLLOW_value_in_dictionary_entry716 = frozenset([10])
FOLLOW_DICTIONARY_SEPERATOR_in_dictionary_entry718 = frozenset([1])
FOLLOW_BRACE_OPEN_in_dictionary758 = frozenset([6, 8, 11, 13, 14])
FOLLOW_dictionary_entry_in_dictionary763 = frozenset([6, 8, 11, 13, 14])
FOLLOW_BRACE_CLOSE_in_dictionary769 = frozenset([1])
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import ParserMain
main = ParserMain("PlistLexer", PlistParser)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| 16,877 | Python | .py | 378 | 28.78836 | 159 | 0.523288 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,319 | treewizard.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/treewizard.py | """ @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import INVALID_TOKEN_TYPE
from .tokens import CommonToken
from .tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, tpattern, labels):
"""
Do the work for parse. Check to see if the tpattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or tpattern is None:
return False
# check roots (wildcard matches anything)
if not isinstance(tpattern, WildcardTreePattern):
if self.adaptor.getType(t1) != tpattern.getType():
return False
# if pattern has text, check node text
if (tpattern.hasTextArg
and self.adaptor.getText(t1) != tpattern.getText()):
return False
if tpattern.label is not None and labels is not None:
# map label in pattern to node in t1
labels[tpattern.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = tpattern.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = tpattern.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
| 18,311 | Python | .py | 443 | 31.124153 | 176 | 0.610558 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,320 | recognizers.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/recognizers.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import sys
import inspect
from . import runtime_version, runtime_version_str
from .constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \
EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE
from .exceptions import RecognitionException, MismatchedTokenException, \
MismatchedRangeException, MismatchedTreeNodeException, \
NoViableAltException, EarlyExitException, MismatchedSetException, \
MismatchedNotSetException, FailedPredicateException, \
BacktrackingFailed, UnwantedTokenException, MissingTokenException
from .tokens import CommonToken, EOF_TOKEN, SKIP_TOKEN
from .compat import set, frozenset, reversed
class RecognizerSharedState(object):
"""
The set of fields needed by an abstract recognizer to recognize input
and recover from errors etc... As a separate state object, it can be
shared among multiple grammars; e.g., when one grammar imports another.
These fields are publically visible but the actual state pointer per
parser is protected.
"""
def __init__(self):
# Track the set of token types that can follow any rule invocation.
# Stack grows upwards.
self.following = []
# This is true when we see an error and before having successfully
# matched a token. Prevents generation of more than one error message
# per error.
self.errorRecovery = False
# The index into the input stream where the last error occurred.
# This is used to prevent infinite loops where an error is found
# but no token is consumed during recovery...another error is found,
# ad naseum. This is a failsafe mechanism to guarantee that at least
# one token/tree node is consumed for two errors.
self.lastErrorIndex = -1
# If 0, no backtracking is going on. Safe to exec actions etc...
# If >0 then it's the level of backtracking.
self.backtracking = 0
# An array[size num rules] of Map<Integer,Integer> that tracks
# the stop token index for each rule. ruleMemo[ruleIndex] is
# the memoization table for ruleIndex. For key ruleStartIndex, you
# get back the stop token for associated rule or MEMO_RULE_FAILED.
#
# This is only used if rule memoization is on (which it is by default).
self.ruleMemo = None
## Did the recognizer encounter a syntax error? Track how many.
self.syntaxErrors = 0
# LEXER FIELDS (must be in same state object to avoid casting
# constantly in generated code and Lexer object) :(
## The goal of all lexer rules/methods is to create a token object.
# This is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return this object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set this to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self.token = None
## What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self.tokenStartCharIndex = -1
## The line on which the first character of the token resides
self.tokenStartLine = None
## The character position of first character within the line
self.tokenStartCharPositionInLine = None
## The channel number for the current token
self.channel = None
## The token type for the current token
self.type = None
## You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set this instance var.
self.text = None
class BaseRecognizer(object):
"""
@brief Common recognizer functionality.
A generic recognizer that can handle recognizers generated from
lexer, parser, and tree grammars. This is all the parsing
support code essentially; most of it is error recovery stuff and
backtracking.
"""
MEMO_RULE_FAILED = -2
MEMO_RULE_UNKNOWN = -1
# copies from Token object for convenience in actions
DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL
# for convenience in actions
HIDDEN = HIDDEN_CHANNEL
# overridden by generated subclasses
tokenNames = None
# The antlr_version attribute has been introduced in 3.1. If it is not
# overwritten in the generated recognizer, we assume a default of 3.0.1.
antlr_version = (3, 0, 1, 0)
antlr_version_str = "3.0.1"
def __init__(self, state=None):
# Input stream of the recognizer. Must be initialized by a subclass.
self.input = None
## State of a lexer, parser, or tree parser are collected into a state
# object so the state can be shared. This sharing is needed to
# have one grammar import others and share same error variables
# and other state variables. It's a kind of explicit multiple
# inheritance via delegation of methods and shared state.
if state is None:
state = RecognizerSharedState()
self._state = state
if self.antlr_version > runtime_version:
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime or higher."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
elif (self.antlr_version < (3, 1, 0, 0) and
self.antlr_version != runtime_version):
# FIXME: make the runtime compatible with 3.0.1 codegen
# and remove this block.
raise RuntimeError(
"ANTLR version mismatch: "
"The recognizer has been generated by V%s, but this runtime "
"is V%s. Please use the V%s runtime."
% (self.antlr_version_str,
runtime_version_str,
self.antlr_version_str))
# this one only exists to shut up pylint :(
def setInput(self, input):
self.input = input
def reset(self):
"""
reset the parser's state; subclasses must rewinds the input stream
"""
# wack everything related to error recovery
if self._state is None:
# no shared state work to do
return
self._state.following = []
self._state.errorRecovery = False
self._state.lastErrorIndex = -1
self._state.syntaxErrors = 0
# wack everything related to backtracking and memoization
self._state.backtracking = 0
if self._state.ruleMemo is not None:
self._state.ruleMemo = {}
def match(self, input, ttype, follow):
"""
Match current input symbol against ttype. Attempt
single token insertion or deletion error recovery. If
that fails, throw MismatchedTokenException.
To turn off single token insertion or deletion error
recovery, override recoverFromMismatchedToken() and have it
throw an exception. See TreeParser.recoverFromMismatchedToken().
This way any error in a rule will cause an exception and
immediate exit from rule. Rule would recover by resynchronizing
to the set of symbols that can follow rule ref.
"""
matchedSymbol = self.getCurrentInputSymbol(input)
if self.input.LA(1) == ttype:
self.input.consume()
self._state.errorRecovery = False
return matchedSymbol
if self._state.backtracking > 0:
# FIXME: need to return matchedSymbol here as well. damn!!
raise BacktrackingFailed
matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow)
return matchedSymbol
def matchAny(self, input):
"""Match the wildcard: in a symbol"""
self._state.errorRecovery = False
self.input.consume()
def mismatchIsUnwantedToken(self, input, ttype):
return input.LA(2) == ttype
def mismatchIsMissingToken(self, input, follow):
if follow is None:
# we have no information about the follow; we can only consume
# a single token and hope for the best
return False
# compute what can follow this grammar element reference
if EOR_TOKEN_TYPE in follow:
viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW()
follow = follow | viableTokensFollowingThisRule
if len(self._state.following) > 0:
# remove EOR if we're not the start symbol
follow = follow - set([EOR_TOKEN_TYPE])
# if current token is consistent with what could come after set
# then we know we're missing a token; error recovery is free to
# "insert" the missing token
if input.LA(1) in follow or EOR_TOKEN_TYPE in follow:
return True
return False
def reportError(self, e):
"""Report a recognition problem.
This method sets errorRecovery to indicate the parser is recovering
not parsing. Once in recovery mode, no errors are generated.
To get out of recovery mode, the parser must successfully match
a token (after a resync). So it will go:
1. error occurs
2. enter recovery mode, report error
3. consume until token found in resynch set
4. try to resume parsing
5. next match() will reset errorRecovery mode
If you override, make sure to update syntaxErrors if you care about
that.
"""
# if we've already reported an error and have not matched a token
# yet successfully, don't report any errors.
if self._state.errorRecovery:
return
self._state.syntaxErrors += 1 # don't count spurious
self._state.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def displayRecognitionError(self, tokenNames, e):
hdr = self.getErrorHeader(e)
msg = self.getErrorMessage(e, tokenNames)
self.emitErrorMessage(hdr+" "+msg)
def getErrorMessage(self, e, tokenNames):
"""
What error message should be generated for the various
exception types?
Not very object-oriented code, but I like having all error message
generation within one method rather than spread among all of the
exception classes. This also makes it much easier for the exception
handling because the exception classes do not have to have pointers back
to this object to access utility routines and so on. Also, changing
the message for an exception type would be difficult because you
would have to subclassing exception, but then somehow get ANTLR
to make those kinds of exception objects instead of the default.
This looks weird, but trust me--it makes the most sense in terms
of flexibility.
For grammar debugging, you will want to override this to add
more information such as the stack frame with
getRuleInvocationStack(e, this.getClass().getName()) and,
for no viable alts, the decision description and state etc...
Override this to change the message generated for one or more
exception types.
"""
if isinstance(e, UnwantedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "extraneous input %s expecting %s" % (
self.getTokenErrorDisplay(e.getUnexpectedToken()),
tokenName
)
elif isinstance(e, MissingTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "missing %s at %s" % (
tokenName, self.getTokenErrorDisplay(e.token)
)
elif isinstance(e, MismatchedTokenException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting " \
+ tokenName
elif isinstance(e, MismatchedTreeNodeException):
tokenName = "<unknown>"
if e.expecting == EOF:
tokenName = "EOF"
else:
tokenName = self.tokenNames[e.expecting]
msg = "mismatched tree node: %s expecting %s" \
% (e.node, tokenName)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at input " \
+ self.getTokenErrorDisplay(e.token)
elif isinstance(e, MismatchedSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched input " \
+ self.getTokenErrorDisplay(e.token) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, FailedPredicateException):
msg = "rule " \
+ e.ruleName \
+ " failed predicate: {" \
+ e.predicateText \
+ "}?"
else:
msg = str(e)
return msg
def getNumberOfSyntaxErrors(self):
"""
Get number of recognition errors (lexer, parser, tree parser). Each
recognizer tracks its own number. So parser and lexer each have
separate count. Does not count the spurious errors found between
an error and next valid token match
See also reportError()
"""
return self._state.syntaxErrors
def getErrorHeader(self, e):
"""
What is the error header, normally line/character position information?
"""
return "line %d:%d" % (e.line, e.charPositionInLine)
def getTokenErrorDisplay(self, t):
"""
How should a token be displayed in an error message? The default
is to display just the text, but during development you might
want to have a lot of information spit out. Override in that case
to use t.toString() (which, for CommonToken, dumps everything about
the token). This is better than forcing you to override a method in
your token objects because you don't have to go modify your lexer
so that it creates a new Java type.
"""
s = t.text
if s is None:
if t.type == EOF:
s = "<EOF>"
else:
s = "<"+t.type+">"
return repr(s)
def emitErrorMessage(self, msg):
"""Override this method to change where error messages go"""
sys.stderr.write(msg + '\n')
def recover(self, input, re):
"""
Recover from an error found on the input stream. This is
for NoViableAlt and mismatched symbol exceptions. If you enable
single token insertion and deletion, this will usually not
handle mismatched symbol exceptions but there could be a mismatched
token that the match() routine could not recover from.
"""
# PROBLEM? what if input stream is not the same as last time
# perhaps make lastErrorIndex a member of input
if self._state.lastErrorIndex == input.index():
# uh oh, another error at same token index; must be a case
# where LT(1) is in the recovery token set so nothing is
# consumed; consume a single token so at least to prevent
# an infinite loop; this is a failsafe.
input.consume()
self._state.lastErrorIndex = input.index()
followSet = self.computeErrorRecoverySet()
self.beginResync()
self.consumeUntil(input, followSet)
self.endResync()
def beginResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def endResync(self):
"""
A hook to listen in on the token consumption during error recovery.
The DebugParser subclasses this to fire events to the listenter.
"""
pass
def computeErrorRecoverySet(self):
"""
Compute the error recovery set for the current rule. During
rule invocation, the parser pushes the set of tokens that can
follow that rule reference on the stack; this amounts to
computing FIRST of what follows the rule reference in the
enclosing rule. This local follow set only includes tokens
from within the rule; i.e., the FIRST computation done by
ANTLR stops at the end of a rule.
EXAMPLE
When you find a "no viable alt exception", the input is not
consistent with any of the alternatives for rule r. The best
thing to do is to consume tokens until you see something that
can legally follow a call to r *or* any rule that called r.
You don't want the exact set of viable next tokens because the
input might just be missing a token--you might consume the
rest of the input looking for one of the missing tokens.
Consider grammar:
a : '[' b ']'
| '(' b ')'
;
b : c '^' INT ;
c : ID
| INT
;
At each rule invocation, the set of tokens that could follow
that rule is pushed on a stack. Here are the various "local"
follow sets:
FOLLOW(b1_in_a) = FIRST(']') = ']'
FOLLOW(b2_in_a) = FIRST(')') = ')'
FOLLOW(c_in_b) = FIRST('^') = '^'
Upon erroneous input "[]", the call chain is
a -> b -> c
and, hence, the follow context stack is:
depth local follow set after call to rule
0 \<EOF> a (from main())
1 ']' b
3 '^' c
Notice that ')' is not included, because b would have to have
been called from a different context in rule a for ')' to be
included.
For error recovery, we cannot consider FOLLOW(c)
(context-sensitive or otherwise). We need the combined set of
all context-sensitive FOLLOW sets--the set of all tokens that
could follow any reference in the call chain. We need to
resync to one of those tokens. Note that FOLLOW(c)='^' and if
we resync'd to that token, we'd consume until EOF. We need to
sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
In this case, for input "[]", LA(1) is in this set so we would
not consume anything and after printing an error rule c would
return normally. It would not find the required '^' though.
At this point, it gets a mismatched token error and throws an
exception (since LA(1) is not in the viable following token
set). The rule exception handler tries to recover, but finds
the same recovery set and doesn't consume anything. Rule b
exits normally returning to rule a. Now it finds the ']' (and
with the successful match exits errorRecovery mode).
So, you cna see that the parser walks up call chain looking
for the token that was a member of the recovery set.
Errors are not generated in errorRecovery mode.
ANTLR's error recovery mechanism is based upon original ideas:
"Algorithms + Data Structures = Programs" by Niklaus Wirth
and
"A note on error recovery in recursive descent parsers":
http://portal.acm.org/citation.cfm?id=947902.947905
Later, Josef Grosch had some good ideas:
"Efficient and Comfortable Error Recovery in Recursive Descent
Parsers":
ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
Like Grosch I implemented local FOLLOW sets that are combined
at run-time upon error to avoid overhead during parsing.
"""
return self.combineFollows(False)
def computeContextSensitiveRuleFOLLOW(self):
"""
Compute the context-sensitive FOLLOW set for current rule.
This is set of token types that can follow a specific rule
reference given a specific call chain. You get the set of
viable tokens that can possibly come next (lookahead depth 1)
given the current call chain. Contrast this with the
definition of plain FOLLOW for rule r:
FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
where x in T* and alpha, beta in V*; T is set of terminals and
V is the set of terminals and nonterminals. In other words,
FOLLOW(r) is the set of all tokens that can possibly follow
references to r in *any* sentential form (context). At
runtime, however, we know precisely which context applies as
we have the call chain. We may compute the exact (rather
than covering superset) set of following tokens.
For example, consider grammar:
stat : ID '=' expr ';' // FOLLOW(stat)=={EOF}
| "return" expr '.'
;
expr : atom ('+' atom)* ; // FOLLOW(expr)=={';','.',')'}
atom : INT // FOLLOW(atom)=={'+',')',';','.'}
| '(' expr ')'
;
The FOLLOW sets are all inclusive whereas context-sensitive
FOLLOW sets are precisely what could follow a rule reference.
For input input "i=(3);", here is the derivation:
stat => ID '=' expr ';'
=> ID '=' atom ('+' atom)* ';'
=> ID '=' '(' expr ')' ('+' atom)* ';'
=> ID '=' '(' atom ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ('+' atom)* ';'
=> ID '=' '(' INT ')' ';'
At the "3" token, you'd have a call chain of
stat -> expr -> atom -> expr -> atom
What can follow that specific nested ref to atom? Exactly ')'
as you can see by looking at the derivation of this specific
input. Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
You want the exact viable token set when recovering from a
token mismatch. Upon token mismatch, if LA(1) is member of
the viable next token set, then you know there is most likely
a missing token in the input stream. "Insert" one by just not
throwing an exception.
"""
return self.combineFollows(True)
def combineFollows(self, exact):
followSet = set()
for idx, localFollowSet in reversed(list(enumerate(self._state.following))):
followSet |= localFollowSet
if exact:
# can we see end of rule?
if EOR_TOKEN_TYPE in localFollowSet:
# Only leave EOR in set if at top (start rule); this lets
# us know if have to include follow(start rule); i.e., EOF
if idx > 0:
followSet.remove(EOR_TOKEN_TYPE)
else:
# can't see end of rule, quit
break
return followSet
def recoverFromMismatchedToken(self, input, ttype, follow):
"""Attempt to recover from a single missing or extra token.
EXTRA TOKEN
LA(1) is not what we are looking for. If LA(2) has the right token,
however, then assume LA(1) is some extra spurious token. Delete it
and LA(2) as if we were doing a normal match(), which advances the
input.
MISSING TOKEN
If current token is consistent with what could come after
ttype then it is ok to 'insert' the missing token, else throw
exception For example, Input 'i=(3;' is clearly missing the
')'. When the parser returns from the nested call to expr, it
will have call chain:
stat -> expr -> atom
and it will be trying to match the ')' at this point in the
derivation:
=> ID '=' '(' INT ')' ('+' atom)* ';'
^
match() will see that ';' doesn't match ')' and report a
mismatched token error. To recover, it sees that LA(1)==';'
is in the set of tokens that can follow the ')' token
reference in rule atom. It can assume that you forgot the ')'.
"""
e = None
# if next token is what we are looking for then "delete" this token
if self.mismatchIsUnwantedToken(input, ttype):
e = UnwantedTokenException(ttype, input)
self.beginResync()
input.consume() # simply delete extra token
self.endResync()
# report after consuming so AW sees the token in the exception
self.reportError(e)
# we want to return the token we're actually matching
matchedSymbol = self.getCurrentInputSymbol(input)
# move past ttype token as if all were ok
input.consume()
return matchedSymbol
# can't recover with single token deletion, try insertion
if self.mismatchIsMissingToken(input, follow):
inserted = self.getMissingSymbol(input, e, ttype, follow)
e = MissingTokenException(ttype, input, inserted)
# report after inserting so AW sees the token in the exception
self.reportError(e)
return inserted
# even that didn't work; must throw the exception
e = MismatchedTokenException(ttype, input)
raise e
def recoverFromMismatchedSet(self, input, e, follow):
"""Not currently used"""
if self.mismatchIsMissingToken(input, follow):
self.reportError(e)
# we don't know how to conjure up a token for sets yet
return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow)
# TODO do single token deletion like above for Token mismatch
raise e
def getCurrentInputSymbol(self, input):
"""
Match needs to return the current input symbol, which gets put
into the label for the associated token ref; e.g., x=ID. Token
and tree parsers need to return different objects. Rather than test
for input stream type or change the IntStream interface, I use
a simple method to ask the recognizer to tell me what the current
input symbol is.
This is ignored for lexers.
"""
return None
def getMissingSymbol(self, input, e, expectedTokenType, follow):
"""Conjure up a missing token during error recovery.
The recognizer attempts to recover from single missing
symbols. But, actions might refer to that missing symbol.
For example, x=ID {f($x);}. The action clearly assumes
that there has been an identifier matched previously and that
$x points at that token. If that token is missing, but
the next token in the stream is what we want we assume that
this token is missing and we keep going. Because we
have to return some token to replace the missing token,
we have to conjure one up. This method gives the user control
over the tokens returned for missing tokens. Mostly,
you will want to create something special for identifier
tokens. For literals such as '{' and ',', the default
action in the parser or tree parser works. It simply creates
a CommonToken of the appropriate type. The text will be the token.
If you change what tokens must be created by the lexer,
override this method to create the appropriate tokens.
"""
return None
## def recoverFromMissingElement(self, input, e, follow):
## """
## This code is factored out from mismatched token and mismatched set
## recovery. It handles "single token insertion" error recovery for
## both. No tokens are consumed to recover from insertions. Return
## true if recovery was possible else return false.
## """
## if self.mismatchIsMissingToken(input, follow):
## self.reportError(e)
## return True
## # nothing to do; throw exception
## return False
def consumeUntil(self, input, tokenTypes):
"""
Consume tokens until one matches the given token or token set
tokenTypes can be a single token type or a set of token types
"""
if not isinstance(tokenTypes, (set, frozenset)):
tokenTypes = frozenset([tokenTypes])
ttype = input.LA(1)
while ttype != EOF and ttype not in tokenTypes:
input.consume()
ttype = input.LA(1)
def getRuleInvocationStack(self):
"""
Return List<String> of the rules in your parser instance
leading up to a call to this method. You could override if
you want more details such as the file/line info of where
in the parser java code a rule is invoked.
This is very useful for error messages and for context-sensitive
error recovery.
You must be careful, if you subclass a generated recognizers.
The default implementation will only search the module of self
for rules, but the subclass will not contain any rules.
You probably want to override this method to look like
def getRuleInvocationStack(self):
return self._getRuleInvocationStack(<class>.__module__)
where <class> is the class of the generated recognizer, e.g.
the superclass of self.
"""
return self._getRuleInvocationStack(self.__module__)
def _getRuleInvocationStack(cls, module):
"""
A more general version of getRuleInvocationStack where you can
pass in, for example, a RecognitionException to get it's rule
stack trace. This routine is shared with all recognizers, hence,
static.
TODO: move to a utility class or something; weird having lexer call
this
"""
# mmmhhh,... perhaps look at the first argument
# (f_locals[co_varnames[0]]?) and test if it's a (sub)class of
# requested recognizer...
rules = []
for frame in reversed(inspect.stack()):
code = frame[0].f_code
codeMod = inspect.getmodule(code)
if codeMod is None:
continue
# skip frames not in requested module
if codeMod.__name__ != module:
continue
# skip some unwanted names
if code.co_name in ('nextToken', '<module>'):
continue
rules.append(code.co_name)
return rules
_getRuleInvocationStack = classmethod(_getRuleInvocationStack)
def getBacktrackingLevel(self):
return self._state.backtracking
def setBacktrackingLevel(self, n):
self._state.backtracking = n
def failed(self):
"""Return whether or not a backtracking attempt failed."""
return self._state.failed
def getGrammarFileName(self):
"""For debugging and other purposes, might want the grammar name.
Have ANTLR generate an implementation for this method.
"""
return self.grammarFileName
def getSourceName(self):
raise NotImplementedError
def toStrings(self, tokens):
"""A convenience method for use most often with template rewrites.
Convert a List<Token> to List<String>
"""
if tokens is None:
return None
return [token.text for token in tokens]
def getRuleMemoization(self, ruleIndex, ruleStartIndex):
"""
Given a rule number and a start token index number, return
MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
start index. If this rule has parsed input starting from the
start index before, then return where the rule stopped parsing.
It returns the index of the last token matched by the rule.
"""
if ruleIndex not in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex] = {}
return self._state.ruleMemo[ruleIndex].get(
ruleStartIndex, self.MEMO_RULE_UNKNOWN
)
def alreadyParsedRule(self, input, ruleIndex):
"""
Has this rule already parsed input at the current index in the
input stream? Return the stop token index or MEMO_RULE_UNKNOWN.
If we attempted but failed to parse properly before, return
MEMO_RULE_FAILED.
This method has a side-effect: if we have seen this input for
this rule and successfully parsed before, then seek ahead to
1 past the stop token matched for this rule last time.
"""
stopIndex = self.getRuleMemoization(ruleIndex, input.index())
if stopIndex == self.MEMO_RULE_UNKNOWN:
return False
if stopIndex == self.MEMO_RULE_FAILED:
raise BacktrackingFailed
else:
input.seek(stopIndex + 1)
return True
def memoize(self, input, ruleIndex, ruleStartIndex, success):
"""
Record whether or not this rule parsed the input at this position
successfully.
"""
if success:
stopTokenIndex = input.index() - 1
else:
stopTokenIndex = self.MEMO_RULE_FAILED
if ruleIndex in self._state.ruleMemo:
self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex
def traceIn(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("enter %s %s" % (ruleName, inputSymbol))
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
sys.stdout.write('\n')
def traceOut(self, ruleName, ruleIndex, inputSymbol):
sys.stdout.write("exit %s %s" % (ruleName, inputSymbol))
if self._state.backtracking > 0:
sys.stdout.write(" backtracking=%s" % self._state.backtracking)
if self._state.failed:
sys.stdout.write(" failed")
else:
sys.stdout.write(" succeeded")
sys.stdout.write('\n')
class TokenSource(object):
"""
@brief Abstract baseclass for token producers.
A source of tokens must provide a sequence of tokens via nextToken()
and also must reveal it's source of characters; CommonToken's text is
computed from a CharStream; it only store indices into the char stream.
Errors from the lexer are never passed to the parser. Either you want
to keep going or you do not upon token recognition error. If you do not
want to continue lexing then you do not want to continue parsing. Just
throw an exception not under RecognitionException and Java will naturally
toss you all the way out of the recognizers. If you want to continue
lexing then you should not throw an exception to the parser--it has already
requested a token. Keep lexing until you get a valid one. Just report
errors and keep going, looking for a valid token.
"""
def nextToken(self):
"""Return a Token object from your input stream (usually a CharStream).
Do not fail/return upon lexing error; keep chewing on the characters
until you get a good one; errors are not passed through to the parser.
"""
raise NotImplementedError
def __iter__(self):
"""The TokenSource is an interator.
The iteration will not include the final EOF token, see also the note
for the next() method.
"""
return self
def next(self):
"""Return next token or raise StopIteration.
Note that this will raise StopIteration when hitting the EOF token,
so EOF will not be part of the iteration.
"""
token = self.nextToken()
if token is None or token.type == EOF:
raise StopIteration
return token
class Lexer(BaseRecognizer, TokenSource):
"""
@brief Baseclass for generated lexer classes.
A lexer is recognizer that draws input symbols from a character stream.
lexer grammars result in a subclass of this object. A Lexer object
uses simplified match() and error recovery mechanisms in the interest
of speed.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
TokenSource.__init__(self)
# Where is the lexer drawing characters from?
self.input = input
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
# rewind the input
self.input.seek(0)
if self._state is None:
# no shared state work to do
return
# wack Lexer state variables
self._state.token = None
self._state.type = INVALID_TOKEN_TYPE
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = -1
self._state.tokenStartLine = -1
self._state.tokenStartCharPositionInLine = -1
self._state.text = None
def nextToken(self):
"""
Return a token from this source; i.e., match a token on the char
stream.
"""
while 1:
self._state.token = None
self._state.channel = DEFAULT_CHANNEL
self._state.tokenStartCharIndex = self.input.index()
self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
self._state.tokenStartLine = self.input.line
self._state.text = None
if self.input.LA(1) == EOF:
return EOF_TOKEN
try:
self.mTokens()
if self._state.token is None:
self.emit()
elif self._state.token == SKIP_TOKEN:
continue
return self._state.token
except NoViableAltException, re:
self.reportError(re)
self.recover(re) # throw out current char and try again
except RecognitionException, re:
self.reportError(re)
# match() routine has already called recover()
def skip(self):
"""
Instruct the lexer to skip creating a token for current lexer rule
and look for another token. nextToken() knows to keep looking when
a lexer rule finishes with token set to SKIP_TOKEN. Recall that
if token==null at end of any token rule, it creates one for you
and emits it.
"""
self._state.token = SKIP_TOKEN
def mTokens(self):
"""This is the lexer entry point that sets instance var 'token'"""
# abstract method
raise NotImplementedError
def setCharStream(self, input):
"""Set the char stream and reset the lexer"""
self.input = None
self.reset()
self.input = input
def getSourceName(self):
return self.input.getSourceName()
def emit(self, token=None):
"""
The standard method called to automatically emit a token at the
outermost lexical rule. The token object should point into the
char buffer start..stop. If there is a text override in 'text',
use that to set the token's text. Override this method to emit
custom Token objects.
If you are building trees, then you should also override
Parser or TreeParser.getMissingSymbol().
"""
if token is None:
token = CommonToken(
input=self.input,
type=self._state.type,
channel=self._state.channel,
start=self._state.tokenStartCharIndex,
stop=self.getCharIndex()-1
)
token.line = self._state.tokenStartLine
token.text = self._state.text
token.charPositionInLine = self._state.tokenStartCharPositionInLine
self._state.token = token
return token
def match(self, s):
if isinstance(s, basestring):
for c in s:
if self.input.LA(1) != ord(c):
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(c, self.input)
self.recover(mte)
raise mte
self.input.consume()
else:
if self.input.LA(1) != s:
if self._state.backtracking > 0:
raise BacktrackingFailed
mte = MismatchedTokenException(unichr(s), self.input)
self.recover(mte) # don't really recover; just consume in lexer
raise mte
self.input.consume()
def matchAny(self):
self.input.consume()
def matchRange(self, a, b):
if self.input.LA(1) < a or self.input.LA(1) > b:
if self._state.backtracking > 0:
raise BacktrackingFailed
mre = MismatchedRangeException(unichr(a), unichr(b), self.input)
self.recover(mre)
raise mre
self.input.consume()
def getLine(self):
return self.input.line
def getCharPositionInLine(self):
return self.input.charPositionInLine
def getCharIndex(self):
"""What is the index of the current character of lookahead?"""
return self.input.index()
def getText(self):
"""
Return the text matched so far for the current token or any
text override.
"""
if self._state.text is not None:
return self._state.text
return self.input.substring(
self._state.tokenStartCharIndex,
self.getCharIndex()-1
)
def setText(self, text):
"""
Set the complete text of this token; it wipes any previous
changes to the text.
"""
self._state.text = text
text = property(getText, setText)
def reportError(self, e):
## TODO: not thought about recovery in lexer yet.
## # if we've already reported an error and have not matched a token
## # yet successfully, don't report any errors.
## if self.errorRecovery:
## #System.err.print("[SPURIOUS] ");
## return;
##
## self.errorRecovery = True
self.displayRecognitionError(self.tokenNames, e)
def getErrorMessage(self, e, tokenNames):
msg = None
if isinstance(e, MismatchedTokenException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting " \
+ self.getCharErrorDisplay(e.expecting)
elif isinstance(e, NoViableAltException):
msg = "no viable alternative at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, EarlyExitException):
msg = "required (...)+ loop did not match anything at character " \
+ self.getCharErrorDisplay(e.c)
elif isinstance(e, MismatchedNotSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedSetException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ repr(e.expecting)
elif isinstance(e, MismatchedRangeException):
msg = "mismatched character " \
+ self.getCharErrorDisplay(e.c) \
+ " expecting set " \
+ self.getCharErrorDisplay(e.a) \
+ ".." \
+ self.getCharErrorDisplay(e.b)
else:
msg = BaseRecognizer.getErrorMessage(self, e, tokenNames)
return msg
def getCharErrorDisplay(self, c):
if c == EOF:
c = '<EOF>'
return repr(c)
def recover(self, re):
"""
Lexers can normally match any char in it's vocabulary after matching
a token, so do the easy thing and just kill a character and hope
it all works out. You can instead use the rule invocation stack
to do sophisticated error recovery if you are in a fragment rule.
"""
self.input.consume()
def traceIn(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol)
def traceOut(self, ruleName, ruleIndex):
inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
self.getLine(),
self.getCharPositionInLine()
)
BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol)
class Parser(BaseRecognizer):
"""
@brief Baseclass for generated parser classes.
"""
def __init__(self, lexer, state=None):
BaseRecognizer.__init__(self, state)
self.setTokenStream(lexer)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
if expectedTokenType == EOF:
tokenText = "<missing EOF>"
else:
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
t = CommonToken(type=expectedTokenType, text=tokenText)
current = input.LT(1)
if current.type == EOF:
current = input.LT(-1)
if current is not None:
t.line = current.line
t.charPositionInLine = current.charPositionInLine
t.channel = DEFAULT_CHANNEL
return t
def setTokenStream(self, input):
"""Set the token stream and reset the parser"""
self.input = None
self.reset()
self.input = input
def getTokenStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
class RuleReturnScope(object):
"""
Rules can return start/stop info as well as possible trees and templates.
"""
def getStart(self):
"""Return the start token or tree."""
return None
def getStop(self):
"""Return the stop token or tree."""
return None
def getTree(self):
"""Has a value potentially if output=AST."""
return None
def getTemplate(self):
"""Has a value potentially if output=template."""
return None
class ParserRuleReturnScope(RuleReturnScope):
"""
Rules that return more than a single value must return an object
containing all the values. Besides the properties defined in
RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
return values. This class simply defines the minimum properties that
are always defined and methods to access the others that might be
available depending on output option such as template and tree.
Note text is not an actual property of the return value, it is computed
from start and stop using the input stream's toString() method. I
could add a ctor to this so that we can pass in and store the input
stream, but I'm not sure we want to do that. It would seem to be undefined
to get the .text property anyway if the rule matches tokens from multiple
input streams.
I do not use getters for fields of objects that are used simply to
group values such as this aggregate. The getters/setters are there to
satisfy the superclass interface.
"""
def __init__(self):
self.start = None
self.stop = None
def getStart(self):
return self.start
def getStop(self):
return self.stop
| 51,730 | Python | .py | 1,091 | 36.936755 | 84 | 0.630046 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,321 | tree.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/tree.py | """ @package antlr3.tree
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
import re
from .constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
from .recognizers import BaseRecognizer, RuleReturnScope
from .streams import IntStream
from .tokens import CommonToken, Token, INVALID_TOKEN
from .exceptions import MismatchedTreeNodeException, \
MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
NoViableAltException
############################################################################
#
# tree related exceptions
#
############################################################################
class RewriteCardinalityException(RuntimeError):
"""
@brief Base class for all exceptions thrown during AST rewrite construction.
This signifies a case where the cardinality of two or more elements
in a subrule are different: (ID INT)+ where |ID|!=|INT|
"""
def __init__(self, elementDescription):
RuntimeError.__init__(self, elementDescription)
self.elementDescription = elementDescription
def getMessage(self):
return self.elementDescription
class RewriteEarlyExitException(RewriteCardinalityException):
"""@brief No elements within a (...)+ in a rewrite rule"""
def __init__(self, elementDescription=None):
RewriteCardinalityException.__init__(self, elementDescription)
class RewriteEmptyStreamException(RewriteCardinalityException):
"""
@brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
"""
pass
############################################################################
#
# basic Tree and TreeAdaptor interfaces
#
############################################################################
class Tree(object):
"""
@brief Abstract baseclass for tree nodes.
What does a tree look like? ANTLR has a number of support classes
such as CommonTreeNodeStream that work on these kinds of trees. You
don't have to make your trees implement this interface, but if you do,
you'll be able to use more support code.
NOTE: When constructing trees, ANTLR can build any kind of tree; it can
even use Token objects as trees if you add a child list to your tokens.
This is a tree node without any payload; just navigation and factory stuff.
"""
def getChild(self, i):
raise NotImplementedError
def getChildCount(self):
raise NotImplementedError
def getParent(self):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def setParent(self, t):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
raise NotImplementedError
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
raise NotImplementedError
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
raise NotImplementedError
def getChildIndex(self):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def setChildIndex(self, index):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def freshenParentAndChildIndexes(self):
"""Set the parent and child index values for all children"""
raise NotImplementedError
def addChild(self, t):
"""
Add t as a child to this node. If t is null, do nothing. If t
is nil, add all children of t to this' children.
"""
raise NotImplementedError
def setChild(self, i, t):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, i):
raise NotImplementedError
def replaceChildren(self, startChildIndex, stopChildIndex, t):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
raise NotImplementedError
def isNil(self):
"""
Indicates the node is a nil node but may still have children, meaning
the tree is a flat list.
"""
raise NotImplementedError
def getTokenStartIndex(self):
"""
What is the smallest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStartIndex(self, index):
raise NotImplementedError
def getTokenStopIndex(self):
"""
What is the largest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStopIndex(self, index):
raise NotImplementedError
def dupNode(self):
raise NotImplementedError
def getType(self):
"""Return a token type; needed for tree parsing."""
raise NotImplementedError
def getText(self):
raise NotImplementedError
def getLine(self):
"""
In case we don't have a token payload, what is the line for errors?
"""
raise NotImplementedError
def getCharPositionInLine(self):
raise NotImplementedError
def toStringTree(self):
raise NotImplementedError
def toString(self):
raise NotImplementedError
class TreeAdaptor(object):
"""
@brief Abstract baseclass for tree adaptors.
How to create and navigate trees. Rather than have a separate factory
and adaptor, I've merged them. Makes sense to encapsulate.
This takes the place of the tree construction code generated in the
generated code in 2.x and the ASTFactory.
I do not need to know the type of a tree at all so they are all
generic Objects. This may increase the amount of typecasting needed. :(
"""
# C o n s t r u c t i o n
def createWithPayload(self, payload):
"""
Create a tree node from Token object; for CommonTree type trees,
then the token just becomes the payload. This is the most
common create call.
Override if you want another kind of node to be built.
"""
raise NotImplementedError
def dupNode(self, treeNode):
"""Duplicate a single tree node.
Override if you want another kind of node to be built."""
raise NotImplementedError
def dupTree(self, tree):
"""Duplicate tree recursively, using dupNode() for each node"""
raise NotImplementedError
def nil(self):
"""
Return a nil node (an empty but non-null node) that can hold
a list of element as the children. If you want a flat tree (a list)
use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
"""
raise NotImplementedError
def errorNode(self, input, start, stop, exc):
"""
Return a tree node representing an error. This node records the
tokens consumed during error recovery. The start token indicates the
input symbol at which the error was detected. The stop token indicates
the last symbol consumed during recovery.
You must specify the input stream so that the erroneous text can
be packaged up in the error node. The exception could be useful
to some applications; default implementation stores ptr to it in
the CommonErrorNode.
This only makes sense during token parsing, not tree parsing.
Tree parsing should happen only when parsing and tree construction
succeed.
"""
raise NotImplementedError
def isNil(self, tree):
"""Is tree considered a nil node used to make lists of child nodes?"""
raise NotImplementedError
def addChild(self, t, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs. Do nothing if t or child is null.
"""
raise NotImplementedError
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
raise NotImplementedError
def rulePostProcessing(self, root):
"""
Given the root of the subtree created for this rule, post process
it to do any simplifications or whatever you want. A required
behavior is to convert ^(nil singleSubtree) to singleSubtree
as the setting of start/stop indexes relies on a single non-nil root
for non-flat trees.
Flat trees such as for lists like "idlist : ID+ ;" are left alone
unless there is only one ID. For a list, the start/stop indexes
are set in the nil node.
This method is executed after all rule tree construction and right
before setTokenBoundaries().
"""
raise NotImplementedError
def getUniqueID(self, node):
"""For identifying trees.
How to identify nodes so we can say "add node to a prior node"?
Even becomeRoot is an issue. Use System.identityHashCode(node)
usually.
"""
raise NotImplementedError
# R e w r i t e R u l e s
def createFromToken(self, tokenType, fromToken, text=None):
"""
Create a new node derived from a token, with a new token type and
(optionally) new text.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
This should invoke createToken(Token).
"""
raise NotImplementedError
def createFromType(self, tokenType, text):
"""Create a new node derived from a token, with a new token type.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG["IMAG"].
This should invoke createToken(int,String).
"""
raise NotImplementedError
# C o n t e n t
def getType(self, t):
"""For tree parsing, I need to know the token type of a node"""
raise NotImplementedError
def setType(self, t, type):
"""Node constructors can set the type of a node"""
raise NotImplementedError
def getText(self, t):
raise NotImplementedError
def setText(self, t, text):
"""Node constructors can set the text of a node"""
raise NotImplementedError
def getToken(self, t):
"""Return the token object from which this node was created.
Currently used only for printing an error message.
The error display routine in BaseRecognizer needs to
display where the input the error occurred. If your
tree of limitation does not store information that can
lead you to the token, you can create a token filled with
the appropriate information and pass that back. See
BaseRecognizer.getErrorMessage().
"""
raise NotImplementedError
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Where are the bounds in the input token stream for this node and
all children? Each rule that creates AST nodes will call this
method right before returning. Flat trees (i.e., lists) will
still usually have a nil root node just to hold the children list.
That node would contain the start/stop indexes then.
"""
raise NotImplementedError
def getTokenStartIndex(self, t):
"""
Get the token start index for this subtree; return -1 if no such index
"""
raise NotImplementedError
def getTokenStopIndex(self, t):
"""
Get the token stop index for this subtree; return -1 if no such index
"""
raise NotImplementedError
# N a v i g a t i o n / T r e e P a r s i n g
def getChild(self, t, i):
"""Get a child 0..n-1 node"""
raise NotImplementedError
def setChild(self, t, i, child):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, t, i):
"""Remove ith child and shift children down from right."""
raise NotImplementedError
def getChildCount(self, t):
"""How many children? If 0, then this is a leaf node"""
raise NotImplementedError
def getParent(self, t):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setParent(self, t, parent):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def getChildIndex(self, t):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setChildIndex(self, t, index):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
# Misc
def create(self, *args):
"""
Deprecated, use createWithPayload, createFromToken or createFromType.
This method only exists to mimic the Java interface of TreeAdaptor.
"""
if len(args) == 1 and isinstance(args[0], Token):
# Object create(Token payload);
## warnings.warn(
## "Using create() is deprecated, use createWithPayload()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createWithPayload(args[0])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
):
# Object create(int tokenType, Token fromToken);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1])
if (len(args) == 3
and isinstance(args[0], (int, long))
and isinstance(args[1], Token)
and isinstance(args[2], basestring)
):
# Object create(int tokenType, Token fromToken, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1], args[2])
if (len(args) == 2
and isinstance(args[0], (int, long))
and isinstance(args[1], basestring)
):
# Object create(int tokenType, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromType()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromType(args[0], args[1])
raise TypeError(
"No create method with this signature found: %s"
% (', '.join(type(v).__name__ for v in args))
)
############################################################################
#
# base implementation of Tree and TreeAdaptor
#
# Tree
# \- BaseTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
#
############################################################################
class BaseTree(Tree):
"""
@brief A generic tree implementation with no payload.
You must subclass to
actually have any user data. ANTLR v3 uses a list of children approach
instead of the child-sibling approach in v2. A flat tree (a list) is
an empty node whose children represent the list. An empty, but
non-null node is called "nil".
"""
# BaseTree is abstract, no need to complain about not implemented abstract
# methods
# pylint: disable-msg=W0223
def __init__(self, node=None):
"""
Create a new node from an existing node does nothing for BaseTree
as there are no fields other than the children list, which cannot
be copied as the children are not considered part of this node.
"""
Tree.__init__(self)
self.children = []
self.parent = None
self.childIndex = 0
def getChild(self, i):
try:
return self.children[i]
except IndexError:
return None
def getChildren(self):
"""@brief Get the children internal List
Note that if you directly mess with
the list, do so at your own risk.
"""
# FIXME: mark as deprecated
return self.children
def getFirstChildWithType(self, treeType):
for child in self.children:
if child.getType() == treeType:
return child
return None
def getChildCount(self):
return len(self.children)
def addChild(self, childTree):
"""Add t as child of this node.
Warning: if t has no children, but child does
and child isNil then this routine moves children to t via
t.children = child.children; i.e., without copying the array.
"""
# this implementation is much simpler and probably less efficient
# than the mumbo-jumbo that Ter did for the Java runtime.
if childTree is None:
return
if childTree.isNil():
# t is an empty node possibly with children
if self.children is childTree.children:
raise ValueError("attempt to add child list to itself")
# fix parent pointer and childIndex for new children
for idx, child in enumerate(childTree.children):
child.parent = self
child.childIndex = len(self.children) + idx
self.children += childTree.children
else:
# child is not nil (don't care about children)
self.children.append(childTree)
childTree.parent = self
childTree.childIndex = len(self.children) - 1
def addChildren(self, children):
"""Add all elements of kids list as children of this node"""
self.children += children
def setChild(self, i, t):
if t is None:
return
if t.isNil():
raise ValueError("Can't set single child to a list")
self.children[i] = t
t.parent = self
t.childIndex = i
def deleteChild(self, i):
killed = self.children[i]
del self.children[i]
# walk rest and decrement their child indexes
for idx, child in enumerate(self.children[i:]):
child.childIndex = i + idx
return killed
def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
if (startChildIndex >= len(self.children)
or stopChildIndex >= len(self.children)
):
raise IndexError("indexes invalid")
replacingHowMany = stopChildIndex - startChildIndex + 1
# normalize to a list of children to add: newChildren
if newTree.isNil():
newChildren = newTree.children
else:
newChildren = [newTree]
replacingWithHowMany = len(newChildren)
delta = replacingHowMany - replacingWithHowMany
if delta == 0:
# if same number of nodes, do direct replace
for idx, child in enumerate(newChildren):
self.children[idx + startChildIndex] = child
child.parent = self
child.childIndex = idx + startChildIndex
else:
# length of children changes...
# ...delete replaced segment...
del self.children[startChildIndex:stopChildIndex+1]
# ...insert new segment...
self.children[startChildIndex:startChildIndex] = newChildren
# ...and fix indeces
self.freshenParentAndChildIndexes(startChildIndex)
def isNil(self):
return False
def freshenParentAndChildIndexes(self, offset=0):
for idx, child in enumerate(self.children[offset:]):
child.childIndex = idx + offset
child.parent = self
def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
if parent != self.parent:
raise ValueError(
"parents don't match; expected %r found %r"
% (parent, self.parent)
)
if i != self.childIndex:
raise ValueError(
"child indexes don't match; expected %d found %d"
% (i, self.childIndex)
)
for idx, child in enumerate(self.children):
child.sanityCheckParentAndChildIndexes(self, idx)
def getChildIndex(self):
"""BaseTree doesn't track child indexes."""
return 0
def setChildIndex(self, index):
"""BaseTree doesn't track child indexes."""
pass
def getParent(self):
"""BaseTree doesn't track parent pointers."""
return None
def setParent(self, t):
"""BaseTree doesn't track parent pointers."""
pass
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
return self.getAncestor(ttype) is not None
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
t = self.getParent()
while t is not None:
if t.getType() == ttype:
return t
t = t.getParent()
return None
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
if selfgetParent() is None:
return None
ancestors = []
t = self.getParent()
while t is not None:
ancestors.insert(0, t) # insert at start
t = t.getParent()
return ancestors
def toStringTree(self):
"""Print out a whole tree not just a node"""
if len(self.children) == 0:
return self.toString()
buf = []
if not self.isNil():
buf.append('(')
buf.append(self.toString())
buf.append(' ')
for i, child in enumerate(self.children):
if i > 0:
buf.append(' ')
buf.append(child.toStringTree())
if not self.isNil():
buf.append(')')
return ''.join(buf)
def getLine(self):
return 0
def getCharPositionInLine(self):
return 0
def toString(self):
"""Override to say how a node (not a tree) should look as text"""
raise NotImplementedError
class BaseTreeAdaptor(TreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
"""
# BaseTreeAdaptor is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def nil(self):
return self.createWithPayload(None)
def errorNode(self, input, start, stop, exc):
"""
create tree node that holds the start and stop tokens associated
with an error.
If you specify your own kind of tree nodes, you will likely have to
override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
if no token payload but you might have to set token type for diff
node type.
You don't have to subclass CommonErrorNode; you will likely need to
subclass your own tree node class to avoid class cast exception.
"""
return CommonErrorNode(input, start, stop, exc)
def isNil(self, tree):
return tree.isNil()
def dupTree(self, t, parent=None):
"""
This is generic in the sense that it will work with any kind of
tree (not just Tree interface). It invokes the adaptor routines
not the tree node routines to do the construction.
"""
if t is None:
return None
newTree = self.dupNode(t)
# ensure new subtree root has parent/child index set
# same index in new tree
self.setChildIndex(newTree, self.getChildIndex(t))
self.setParent(newTree, parent)
for i in range(self.getChildCount(t)):
child = self.getChild(t, i)
newSubTree = self.dupTree(child, t)
self.addChild(newTree, newSubTree)
return newTree
def addChild(self, tree, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs.
"""
#if isinstance(child, Token):
# child = self.createWithPayload(child)
if tree is not None and child is not None:
tree.addChild(child)
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
if isinstance(newRoot, Token):
newRoot = self.create(newRoot)
if oldRoot is None:
return newRoot
if not isinstance(newRoot, CommonTree):
newRoot = self.createWithPayload(newRoot)
# handle ^(nil real-node)
if newRoot.isNil():
nc = newRoot.getChildCount()
if nc == 1:
newRoot = newRoot.getChild(0)
elif nc > 1:
# TODO: make tree run time exceptions hierarchy
raise RuntimeError("more than one node as root")
# add oldRoot to newRoot; addChild takes care of case where oldRoot
# is a flat list (i.e., nil-rooted tree). All children of oldRoot
# are added to newRoot.
newRoot.addChild(oldRoot)
return newRoot
def rulePostProcessing(self, root):
"""Transform ^(nil x) to x and nil to null"""
if root is not None and root.isNil():
if root.getChildCount() == 0:
root = None
elif root.getChildCount() == 1:
root = root.getChild(0)
# whoever invokes rule will set parent and child index
root.setParent(None)
root.setChildIndex(-1)
return root
def createFromToken(self, tokenType, fromToken, text=None):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(fromToken, Token), type(fromToken).__name__
assert text is None or isinstance(text, basestring), type(text).__name__
fromToken = self.createToken(fromToken)
fromToken.type = tokenType
if text is not None:
fromToken.text = text
t = self.createWithPayload(fromToken)
return t
def createFromType(self, tokenType, text):
assert isinstance(tokenType, (int, long)), type(tokenType).__name__
assert isinstance(text, basestring) or text is None, type(text).__name__
fromToken = self.createToken(tokenType=tokenType, text=text)
t = self.createWithPayload(fromToken)
return t
def getType(self, t):
return t.getType()
def setType(self, t, type):
raise RuntimeError("don't know enough about Tree node")
def getText(self, t):
return t.getText()
def setText(self, t, text):
raise RuntimeError("don't know enough about Tree node")
def getChild(self, t, i):
return t.getChild(i)
def setChild(self, t, i, child):
t.setChild(i, child)
def deleteChild(self, t, i):
return t.deleteChild(i)
def getChildCount(self, t):
return t.getChildCount()
def getUniqueID(self, node):
return hash(node)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
raise NotImplementedError
############################################################################
#
# common tree implementation
#
# Tree
# \- BaseTree
# \- CommonTree
# \- CommonErrorNode
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class CommonTree(BaseTree):
"""@brief A tree node that is wrapper for a Token object.
After 3.0 release
while building tree rewrite stuff, it became clear that computing
parent and child index is very difficult and cumbersome. Better to
spend the space in every tree node. If you don't want these extra
fields, it's easy to cut them out in your own BaseTree subclass.
"""
def __init__(self, payload):
BaseTree.__init__(self)
# What token indexes bracket all tokens associated with this node
# and below?
self.startIndex = -1
self.stopIndex = -1
# Who is the parent node of this node; if null, implies node is root
self.parent = None
# What index is this node in the child list? Range: 0..n-1
self.childIndex = -1
# A single token is the payload
if payload is None:
self.token = None
elif isinstance(payload, CommonTree):
self.token = payload.token
self.startIndex = payload.startIndex
self.stopIndex = payload.stopIndex
elif payload is None or isinstance(payload, Token):
self.token = payload
else:
raise TypeError(type(payload).__name__)
def getToken(self):
return self.token
def dupNode(self):
return CommonTree(self)
def isNil(self):
return self.token is None
def getType(self):
if self.token is None:
return INVALID_TOKEN_TYPE
return self.token.getType()
type = property(getType)
def getText(self):
if self.token is None:
return None
return self.token.text
text = property(getText)
def getLine(self):
if self.token is None or self.token.getLine() == 0:
if self.getChildCount():
return self.getChild(0).getLine()
else:
return 0
return self.token.getLine()
line = property(getLine)
def getCharPositionInLine(self):
if self.token is None or self.token.getCharPositionInLine() == -1:
if self.getChildCount():
return self.getChild(0).getCharPositionInLine()
else:
return 0
else:
return self.token.getCharPositionInLine()
charPositionInLine = property(getCharPositionInLine)
def getTokenStartIndex(self):
if self.startIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.startIndex
def setTokenStartIndex(self, index):
self.startIndex = index
tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
def getTokenStopIndex(self):
if self.stopIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.stopIndex
def setTokenStopIndex(self, index):
self.stopIndex = index
tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
def setUnknownTokenBoundaries(self):
"""For every node in this subtree, make sure it's start/stop token's
are set. Walk depth first, visit bottom up. Only updates nodes
with at least one token index < 0.
"""
if self.children is None:
if self.startIndex < 0 or self.stopIndex < 0:
self.startIndex = self.stopIndex = self.token.getTokenIndex()
return
for child in self.children:
child.setUnknownTokenBoundaries()
if self.startIndex >= 0 and self.stopIndex >= 0:
# already set
return
if self.children:
firstChild = self.children[0]
lastChild = self.children[-1]
self.startIndex = firstChild.getTokenStartIndex()
self.stopIndex = lastChild.getTokenStopIndex()
def getChildIndex(self):
#FIXME: mark as deprecated
return self.childIndex
def setChildIndex(self, idx):
#FIXME: mark as deprecated
self.childIndex = idx
def getParent(self):
#FIXME: mark as deprecated
return self.parent
def setParent(self, t):
#FIXME: mark as deprecated
self.parent = t
def toString(self):
if self.isNil():
return "nil"
if self.getType() == INVALID_TOKEN_TYPE:
return "<errornode>"
return self.token.text
__str__ = toString
def toStringTree(self):
if not self.children:
return self.toString()
ret = ''
if not self.isNil():
ret += '(%s ' % (self.toString())
ret += ' '.join([child.toStringTree() for child in self.children])
if not self.isNil():
ret += ')'
return ret
INVALID_NODE = CommonTree(INVALID_TOKEN)
class CommonErrorNode(CommonTree):
"""A node representing erroneous token range in token stream"""
def __init__(self, input, start, stop, exc):
CommonTree.__init__(self, None)
if (stop is None or
(stop.getTokenIndex() < start.getTokenIndex() and
stop.getType() != EOF
)
):
# sometimes resync does not consume a token (when LT(1) is
# in follow set. So, stop will be 1 to left to start. adjust.
# Also handle case where start is the first token and no token
# is consumed during recovery; LT(-1) will return null.
stop = start
self.input = input
self.start = start
self.stop = stop
self.trappedException = exc
def isNil(self):
return False
def getType(self):
return INVALID_TOKEN_TYPE
def getText(self):
if isinstance(self.start, Token):
i = self.start.getTokenIndex()
j = self.stop.getTokenIndex()
if self.stop.getType() == EOF:
j = self.input.size()
badText = self.input.toString(i, j)
elif isinstance(self.start, Tree):
badText = self.input.toString(self.start, self.stop)
else:
# people should subclass if they alter the tree type so this
# next one is for sure correct.
badText = "<unknown>"
return badText
def toString(self):
if isinstance(self.trappedException, MissingTokenException):
return ("<missing type: "
+ str(self.trappedException.getMissingType())
+ ">")
elif isinstance(self.trappedException, UnwantedTokenException):
return ("<extraneous: "
+ str(self.trappedException.getUnexpectedToken())
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, MismatchedTokenException):
return ("<mismatched token: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, NoViableAltException):
return ("<unexpected: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
return "<error: "+self.getText()+">"
class CommonTreeAdaptor(BaseTreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
It provides
really just factory methods; all the work is done by BaseTreeAdaptor.
If you would like to have different tokens created than ClassicToken
objects, you need to override this and then set the parser tree adaptor to
use your subclass.
To get your parser to build nodes of a different type, override
create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
dupNode is called to duplicate nodes during rewrite operations.
"""
def dupNode(self, treeNode):
"""
Duplicate a node. This is part of the factory;
override if you want another kind of node to be built.
I could use reflection to prevent having to override this
but reflection is slow.
"""
if treeNode is None:
return None
return treeNode.dupNode()
def createWithPayload(self, payload):
return CommonTree(payload)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
if fromToken is not None:
return CommonToken(oldToken=fromToken)
return CommonToken(type=tokenType, text=text)
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Track start/stop token for subtree root created for a rule.
Only works with Tree nodes. For rules that match nothing,
seems like this will yield start=i and stop=i-1 in a nil node.
Might be useful info so I'll not force to be i..i.
"""
if t is None:
return
start = 0
stop = 0
if startToken is not None:
start = startToken.index
if stopToken is not None:
stop = stopToken.index
t.setTokenStartIndex(start)
t.setTokenStopIndex(stop)
def getTokenStartIndex(self, t):
if t is None:
return -1
return t.getTokenStartIndex()
def getTokenStopIndex(self, t):
if t is None:
return -1
return t.getTokenStopIndex()
def getText(self, t):
if t is None:
return None
return t.getText()
def getType(self, t):
if t is None:
return INVALID_TOKEN_TYPE
return t.getType()
def getToken(self, t):
"""
What is the Token associated with this node? If
you are not using CommonTree, then you must
override this in your own adaptor.
"""
if isinstance(t, CommonTree):
return t.getToken()
return None # no idea what to do
def getChild(self, t, i):
if t is None:
return None
return t.getChild(i)
def getChildCount(self, t):
if t is None:
return 0
return t.getChildCount()
def getParent(self, t):
return t.getParent()
def setParent(self, t, parent):
t.setParent(parent)
def getChildIndex(self, t):
if t is None:
return 0
return t.getChildIndex()
def setChildIndex(self, t, index):
t.setChildIndex(index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
parent.replaceChildren(startChildIndex, stopChildIndex, t)
############################################################################
#
# streams
#
# TreeNodeStream
# \- BaseTree
# \- CommonTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class TreeNodeStream(IntStream):
"""@brief A stream of tree nodes
It accessing nodes from a tree of some kind.
"""
# TreeNodeStream is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError
def LT(self, k):
"""
Get tree node at current input pointer + i ahead where i=1 is next node.
i<0 indicates nodes in the past. So LT(-1) is previous node, but
implementations are not required to provide results for k < -1.
LT(0) is undefined. For i>=n, return null.
Return null for LT(0) and any index that results in an absolute address
that is negative.
This is analogus to the LT() method of the TokenStream, but this
returns a tree node instead of a token. Makes code gen identical
for both parser and tree grammars. :)
"""
raise NotImplementedError
def getTreeSource(self):
"""
Where is this stream pulling nodes from? This is not the name, but
the object that provides node objects.
"""
raise NotImplementedError
def getTokenStream(self):
"""
If the tree associated with this stream was created from a TokenStream,
you can specify it here. Used to do rule $text attribute in tree
parser. Optional unless you use tree parser rule text attribute
or output=template and rewrite=true options.
"""
raise NotImplementedError
def getTreeAdaptor(self):
"""
What adaptor can tell me how to interpret/navigate nodes and
trees. E.g., get text of a node.
"""
raise NotImplementedError
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so we have to instantiate new ones. When doing normal tree
parsing, it's slow and a waste of memory to create unique
navigation nodes. Default should be false;
"""
raise NotImplementedError
def toString(self, start, stop):
"""
Return the text of all nodes from start to stop, inclusive.
If the stream does not buffer all the nodes then it can still
walk recursively from start until stop. You can always return
null or "" too, but users should not access $ruleLabel.text in
an action of course in that case.
"""
raise NotImplementedError
# REWRITING TREES (used by tree parser)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call. The stream is notified because it is walking the
tree and might need to know you are monkeying with the underlying
tree. Also, it might be able to modify the node stream to avoid
restreaming for future phases.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
class CommonTreeNodeStream(TreeNodeStream):
"""@brief A buffered stream of tree nodes.
Nodes can be from a tree of ANY kind.
This node stream sucks all nodes out of the tree specified in
the constructor during construction and makes pointers into
the tree using an array of Object pointers. The stream necessarily
includes pointers to DOWN and UP and EOF nodes.
This stream knows how to mark/release for backtracking.
This stream is most suitable for tree interpreters that need to
jump around a lot or for tree parsers requiring speed (at cost of memory).
There is some duplicated functionality here with UnBufferedTreeNodeStream
but just in bookkeeping, not tree walking etc...
@see UnBufferedTreeNodeStream
"""
def __init__(self, *args):
TreeNodeStream.__init__(self)
if len(args) == 1:
adaptor = CommonTreeAdaptor()
tree = args[0]
nodes = None
down = None
up = None
eof = None
elif len(args) == 2:
adaptor = args[0]
tree = args[1]
nodes = None
down = None
up = None
eof = None
elif len(args) == 3:
parent = args[0]
start = args[1]
stop = args[2]
adaptor = parent.adaptor
tree = parent.root
nodes = parent.nodes[start:stop]
down = parent.down
up = parent.up
eof = parent.eof
else:
raise TypeError("Invalid arguments")
# all these navigation nodes are shared and hence they
# cannot contain any line/column info
if down is not None:
self.down = down
else:
self.down = adaptor.createFromType(DOWN, "DOWN")
if up is not None:
self.up = up
else:
self.up = adaptor.createFromType(UP, "UP")
if eof is not None:
self.eof = eof
else:
self.eof = adaptor.createFromType(EOF, "EOF")
# The complete mapping from stream index to tree node.
# This buffer includes pointers to DOWN, UP, and EOF nodes.
# It is built upon ctor invocation. The elements are type
# Object as we don't what the trees look like.
# Load upon first need of the buffer so we can set token types
# of interest for reverseIndexing. Slows us down a wee bit to
# do all of the if p==-1 testing everywhere though.
if nodes is not None:
self.nodes = nodes
else:
self.nodes = []
# Pull nodes from which tree?
self.root = tree
# IF this tree (root) was created from a token stream, track it.
self.tokens = None
# What tree adaptor was used to build these trees
self.adaptor = adaptor
# Reuse same DOWN, UP navigation nodes unless this is true
self.uniqueNavigationNodes = False
# The index into the nodes list of the current node (next node
# to consume). If -1, nodes array not filled yet.
self.p = -1
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
# Stack of indexes used for push/pop calls
self.calls = []
def fillBuffer(self):
"""Walk tree with depth-first-search and fill nodes buffer.
Don't do DOWN, UP nodes if its a list (t is isNil).
"""
self._fillBuffer(self.root)
self.p = 0 # buffer of nodes intialized now
def _fillBuffer(self, t):
nil = self.adaptor.isNil(t)
if not nil:
self.nodes.append(t) # add this node
# add DOWN node if t has children
n = self.adaptor.getChildCount(t)
if not nil and n > 0:
self.addNavigationNode(DOWN)
# and now add all its children
for c in range(n):
self._fillBuffer(self.adaptor.getChild(t, c))
# add UP node if t has children
if not nil and n > 0:
self.addNavigationNode(UP)
def getNodeIndex(self, node):
"""What is the stream index for node? 0..n-1
Return -1 if node not found.
"""
if self.p == -1:
self.fillBuffer()
for i, t in enumerate(self.nodes):
if t == node:
return i
return -1
def addNavigationNode(self, ttype):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so instantiate new ones when uniqueNavigationNodes is true.
"""
navNode = None
if ttype == DOWN:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(DOWN, "DOWN")
else:
navNode = self.down
else:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(UP, "UP")
else:
navNode = self.up
self.nodes.append(navNode)
def get(self, i):
if self.p == -1:
self.fillBuffer()
return self.nodes[i]
def LT(self, k):
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
if self.p + k - 1 >= len(self.nodes):
return self.eof
return self.nodes[self.p + k - 1]
def getCurrentSymbol(self):
return self.LT(1)
def LB(self, k):
"""Look backwards k nodes"""
if k == 0:
return None
if self.p - k < 0:
return None
return self.nodes[self.p - k]
def getTreeSource(self):
return self.root
def getSourceName(self):
return self.getTokenStream().getSourceName()
def getTokenStream(self):
return self.tokens
def setTokenStream(self, tokens):
self.tokens = tokens
def getTreeAdaptor(self):
return self.adaptor
def hasUniqueNavigationNodes(self):
return self.uniqueNavigationNodes
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
self.uniqueNavigationNodes = uniqueNavigationNodes
def consume(self):
if self.p == -1:
self.fillBuffer()
self.p += 1
def LA(self, i):
return self.adaptor.getType(self.LT(i))
def mark(self):
if self.p == -1:
self.fillBuffer()
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
if self.p == -1:
self.fillBuffer()
self.p = index
def push(self, index):
"""
Make stream jump to a new location, saving old location.
Switch back with pop().
"""
self.calls.append(self.p) # save current index
self.seek(index)
def pop(self):
"""
Seek back to previous index saved during last push() call.
Return top of stack (return index).
"""
ret = self.calls.pop(-1)
self.seek(ret)
return ret
def reset(self):
self.p = 0
self.lastMarker = 0
self.calls = []
def size(self):
if self.p == -1:
self.fillBuffer()
return len(self.nodes)
# TREE REWRITE INTERFACE
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
self.adaptor.replaceChildren(
parent, startChildIndex, stopChildIndex, t
)
def __str__(self):
"""Used for testing, just return the token type stream"""
if self.p == -1:
self.fillBuffer()
return ' '.join([str(self.adaptor.getType(node))
for node in self.nodes
])
def toString(self, start, stop):
if start is None or stop is None:
return None
if self.p == -1:
self.fillBuffer()
#System.out.println("stop: "+stop);
#if ( start instanceof CommonTree )
# System.out.print("toString: "+((CommonTree)start).getToken()+", ");
#else
# System.out.println(start);
#if ( stop instanceof CommonTree )
# System.out.println(((CommonTree)stop).getToken());
#else
# System.out.println(stop);
# if we have the token stream, use that to dump text in order
if self.tokens is not None:
beginTokenIndex = self.adaptor.getTokenStartIndex(start)
endTokenIndex = self.adaptor.getTokenStopIndex(stop)
# if it's a tree, use start/stop index from start node
# else use token range from start/stop nodes
if self.adaptor.getType(stop) == UP:
endTokenIndex = self.adaptor.getTokenStopIndex(start)
elif self.adaptor.getType(stop) == EOF:
endTokenIndex = self.size() -2 # don't use EOF
return self.tokens.toString(beginTokenIndex, endTokenIndex)
# walk nodes looking for start
i, t = 0, None
for i, t in enumerate(self.nodes):
if t == start:
break
# now walk until we see stop, filling string buffer with text
buf = []
t = self.nodes[i]
while t != stop:
text = self.adaptor.getText(t)
if text is None:
text = " " + self.adaptor.getType(t)
buf.append(text)
i += 1
t = self.nodes[i]
# include stop node too
text = self.adaptor.getText(stop)
if text is None:
text = " " +self.adaptor.getType(stop)
buf.append(text)
return ''.join(buf)
## iterator interface
def __iter__(self):
if self.p == -1:
self.fillBuffer()
for node in self.nodes:
yield node
#############################################################################
#
# tree parser
#
#############################################################################
class TreeParser(BaseRecognizer):
"""@brief Baseclass for generated tree parsers.
A parser for a stream of tree nodes. "tree grammars" result in a subclass
of this. All the error reporting and recovery is shared with Parser via
the BaseRecognizer superclass.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
self.input = None
self.setTreeNodeStream(input)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def setTreeNodeStream(self, input):
"""Set the input stream"""
self.input = input
def getTreeNodeStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
return CommonTree(CommonToken(type=expectedTokenType, text=tokenText))
# precompiled regex used by inContext
dotdot = ".*[^.]\\.\\.[^.].*"
doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*"
dotdotPattern = re.compile(dotdot)
doubleEtcPattern = re.compile(doubleEtc)
def inContext(self, context, adaptor=None, tokenName=None, t=None):
"""Check if current node in input has a context.
Context means sequence of nodes towards root of tree. For example,
you might say context is "MULT" which means my parent must be MULT.
"CLASS VARDEF" says current node must be child of a VARDEF and whose
parent is a CLASS node. You can use "..." to mean zero-or-more nodes.
"METHOD ... VARDEF" means my parent is VARDEF and somewhere above
that is a METHOD node. The first node in the context is not
necessarily the root. The context matcher stops matching and returns
true when it runs out of context. There is no way to force the first
node to be the root.
"""
return _inContext(
self.input.getTreeAdaptor(), self.getTokenNames(),
self.input.LT(1), context)
@classmethod
def _inContext(cls, adaptor, tokenNames, t, context):
"""The worker for inContext.
It's static and full of parameters for testing purposes.
"""
if cls.dotdotPattern.match(context):
# don't allow "..", must be "..."
raise ValueError("invalid syntax: ..")
if cls.doubleEtcPattern.match(context):
# don't allow double "..."
raise ValueError("invalid syntax: ... ...")
# ensure spaces around ...
context = context.replace("...", " ... ")
context = context.strip()
nodes = context.split()
ni = len(nodes) - 1
t = adaptor.getParent(t)
while ni >= 0 and t is not None:
if nodes[ni] == "...":
# walk upwards until we see nodes[ni-1] then continue walking
if ni == 0:
# ... at start is no-op
return True
goal = nodes[ni-1]
ancestor = cls._getAncestor(adaptor, tokenNames, t, goal)
if ancestor is None:
return False
t = ancestor
ni -= 1
name = tokenNames[adaptor.getType(t)]
if name != nodes[ni]:
return False
# advance to parent and to previous element in context node list
ni -= 1
t = adaptor.getParent(t)
# at root but more nodes to match
if t is None and ni >= 0:
return False
return True
@staticmethod
def _getAncestor(adaptor, tokenNames, t, goal):
"""Helper for static inContext."""
while t is not None:
name = tokenNames[adaptor.getType(t)]
if name == goal:
return t
t = adaptor.getParent(t)
return None
def matchAny(self, ignore): # ignore stream, copy of this.input
"""
Match '.' in tree parser has special meaning. Skip node or
entire tree if node has children. If children, scan until
corresponding UP node.
"""
self._state.errorRecovery = False
look = self.input.LT(1)
if self.input.getTreeAdaptor().getChildCount(look) == 0:
self.input.consume() # not subtree, consume 1 node and return
return
# current node is a subtree, skip to corresponding UP.
# must count nesting level to get right UP
level = 0
tokenType = self.input.getTreeAdaptor().getType(look)
while tokenType != EOF and not (tokenType == UP and level==0):
self.input.consume()
look = self.input.LT(1)
tokenType = self.input.getTreeAdaptor().getType(look)
if tokenType == DOWN:
level += 1
elif tokenType == UP:
level -= 1
self.input.consume() # consume UP
def mismatch(self, input, ttype, follow):
"""
We have DOWN/UP nodes in the stream that have no line info; override.
plus we want to alter the exception type. Don't try to recover
from tree parser errors inline...
"""
raise MismatchedTreeNodeException(ttype, input)
def getErrorHeader(self, e):
"""
Prefix error message with the grammar name because message is
always intended for the programmer because the parser built
the input tree not the user.
"""
return (self.getGrammarFileName() +
": node from %sline %s:%s"
% (['', "after "][e.approximateLineInfo],
e.line,
e.charPositionInLine
)
)
def getErrorMessage(self, e, tokenNames):
"""
Tree parsers parse nodes they usually have a token object as
payload. Set the exception token and do the default behavior.
"""
if isinstance(self, TreeParser):
adaptor = e.input.getTreeAdaptor()
e.token = adaptor.getToken(e.node)
if e.token is not None: # could be an UP/DOWN node
e.token = CommonToken(
type=adaptor.getType(e.node),
text=adaptor.getText(e.node)
)
return BaseRecognizer.getErrorMessage(self, e, tokenNames)
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
#############################################################################
#
# tree visitor
#
#############################################################################
class TreeVisitor(object):
"""Do a depth first walk of a tree, applying pre() and post() actions
we go.
"""
def __init__(self, adaptor=None):
if adaptor is not None:
self.adaptor = adaptor
else:
self.adaptor = CommonTreeAdaptor()
def visit(self, t, pre_action=None, post_action=None):
"""Visit every node in tree t and trigger an action for each node
before/after having visited all of its children. Bottom up walk.
Execute both actions even if t has no children. Ignore return
results from transforming children since they will have altered
the child list of this node (their parent). Return result of
applying post action to this node.
The Python version differs from the Java version by taking two
callables 'pre_action' and 'post_action' instead of a class instance
that wraps those methods. Those callables must accept a TreeNode as
their single argument and return the (potentially transformed or
replaced) TreeNode.
"""
isNil = self.adaptor.isNil(t)
if pre_action is not None and not isNil:
# if rewritten, walk children of new t
t = pre_action(t)
for idx in xrange(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, idx)
self.visit(child, pre_action, post_action)
if post_action is not None and not isNil:
t = post_action(t)
return t
#############################################################################
#
# streams for rule rewriting
#
#############################################################################
class RewriteRuleElementStream(object):
"""@brief Internal helper class.
A generic list of elements tracked in an alternative to be used in
a -> rewrite rule. We need to subclass to fill in the next() method,
which returns either an AST node wrapped around a token payload or
an existing subtree.
Once you start next()ing, do not try to add more elements. It will
break the cursor tracking I believe.
@see org.antlr.runtime.tree.RewriteRuleSubtreeStream
@see org.antlr.runtime.tree.RewriteRuleTokenStream
TODO: add mechanism to detect/puke on modification after reading from
stream
"""
def __init__(self, adaptor, elementDescription, elements=None):
# Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(),
# which bumps it to 1 meaning no more elements.
self.cursor = 0
# Track single elements w/o creating a list. Upon 2nd add, alloc list
self.singleElement = None
# The list of tokens or subtrees we are tracking
self.elements = None
# Once a node / subtree has been used in a stream, it must be dup'd
# from then on. Streams are reset after subrules so that the streams
# can be reused in future subrules. So, reset must set a dirty bit.
# If dirty, then next() always returns a dup.
self.dirty = False
# The element or stream description; usually has name of the token or
# rule reference that this list tracks. Can include rulename too, but
# the exception would track that info.
self.elementDescription = elementDescription
self.adaptor = adaptor
if isinstance(elements, (list, tuple)):
# Create a stream, but feed off an existing list
self.singleElement = None
self.elements = elements
else:
# Create a stream with one element
self.add(elements)
def reset(self):
"""
Reset the condition of this stream so that it appears we have
not consumed any of its elements. Elements themselves are untouched.
Once we reset the stream, any future use will need duplicates. Set
the dirty bit.
"""
self.cursor = 0
self.dirty = True
def add(self, el):
if el is None:
return
if self.elements is not None: # if in list, just add
self.elements.append(el)
return
if self.singleElement is None: # no elements yet, track w/o list
self.singleElement = el
return
# adding 2nd element, move to list
self.elements = []
self.elements.append(self.singleElement)
self.singleElement = None
self.elements.append(el)
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup
el = self._next()
return self.dup(el)
# test size above then fetch
el = self._next()
return el
def _next(self):
"""
do the work of getting the next element, making sure that it's
a tree node or subtree. Deal with the optimization of single-
element list versus list of size > 1. Throw an exception
if the stream is empty or we're out of elements and size>1.
protected so you can override in a subclass if necessary.
"""
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self): # out of elements?
if len(self) == 1: # if size is 1, it's ok; return and we'll dup
return self.toTree(self.singleElement)
# out of elements and size was not 1, so we can't dup
raise RewriteCardinalityException(self.elementDescription)
# we have elements
if self.singleElement is not None:
self.cursor += 1 # move cursor even for single element list
return self.toTree(self.singleElement)
# must have more than one in list, pull from elements
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
return o
def dup(self, el):
"""
When constructing trees, sometimes we need to dup a token or AST
subtree. Dup'ing a token means just creating another AST node
around it. For trees, you must call the adaptor.dupTree() unless
the element is for a tree root; then it must be a node dup.
"""
raise NotImplementedError
def toTree(self, el):
"""
Ensure stream emits trees; tokens must be converted to AST nodes.
AST nodes can be passed through unmolested.
"""
return el
def hasNext(self):
return ( (self.singleElement is not None and self.cursor < 1)
or (self.elements is not None
and self.cursor < len(self.elements)
)
)
def size(self):
if self.singleElement is not None:
return 1
if self.elements is not None:
return len(self.elements)
return 0
__len__ = size
def getDescription(self):
"""Deprecated. Directly access elementDescription attribute"""
return self.elementDescription
class RewriteRuleTokenStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def toTree(self, el):
# Don't convert to a tree unless they explicitly call nextTree.
# This way we can do hetero tree nodes in rewrite.
return el
def nextNode(self):
t = self._next()
return self.adaptor.createWithPayload(t)
def nextToken(self):
return self._next()
def dup(self, el):
raise TypeError("dup can't be called for a token stream.")
class RewriteRuleSubtreeStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def nextNode(self):
"""
Treat next element as a single node even if it's a subtree.
This is used instead of next() when the result has to be a
tree root node. Also prevents us from duplicating recently-added
children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
must dup the type node, but ID has been added.
Referencing a rule result twice is ok; dup entire tree as
we can't be adding trees as root; e.g., expr expr.
Hideous code duplication here with super.next(). Can't think of
a proper way to refactor. This needs to always call dup node
and super.next() doesn't know which to call: dup node or dup tree.
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup (at most a single node
# since this is for making root nodes).
el = self._next()
return self.adaptor.dupNode(el)
# test size above then fetch
el = self._next()
return el
def dup(self, el):
return self.adaptor.dupTree(el)
class RewriteRuleNodeStream(RewriteRuleElementStream):
"""
Queues up nodes matched on left side of -> in a tree parser. This is
the analog of RewriteRuleTokenStream for normal parsers.
"""
def nextNode(self):
return self._next()
def toTree(self, el):
return self.adaptor.dupNode(el)
def dup(self, el):
# we dup every node, so don't have to worry about calling dup; short-
#circuited next() so it doesn't call.
raise TypeError("dup can't be called for a node stream.")
class TreeRuleReturnScope(RuleReturnScope):
"""
This is identical to the ParserRuleReturnScope except that
the start property is a tree nodes not Token object
when you are parsing trees. To be generic the tree node types
have to be Object.
"""
def __init__(self):
self.start = None
self.tree = None
def getStart(self):
return self.start
def getTree(self):
return self.tree
| 78,338 | Python | .py | 1,854 | 32.673679 | 82 | 0.614011 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,322 | streams.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/streams.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import codecs
from StringIO import StringIO
from .constants import DEFAULT_CHANNEL, EOF
from .tokens import Token, EOF_TOKEN
############################################################################
#
# basic interfaces
# IntStream
# +- CharStream
# \- TokenStream
#
# subclasses must implemented all methods
#
############################################################################
class IntStream(object):
"""
@brief Base interface for streams of integer values.
A simple stream of integers used when all I care about is the char
or token type sequence (such as interpretation).
"""
def consume(self):
raise NotImplementedError
def LA(self, i):
"""Get int at current input pointer + i ahead where i=1 is next int.
Negative indexes are allowed. LA(-1) is previous token (token
just matched). LA(-i) where i is before first token should
yield -1, invalid char / EOF.
"""
raise NotImplementedError
def mark(self):
"""
Tell the stream to start buffering if it hasn't already. Return
current input position, index(), or some other marker so that
when passed to rewind() you get back to the same spot.
rewind(mark()) should not affect the input cursor. The Lexer
track line/col info as well as input index so its markers are
not pure input indexes. Same for tree node streams.
"""
raise NotImplementedError
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the symbol about to be
read not the most recently read symbol.
"""
raise NotImplementedError
def rewind(self, marker=None):
"""
Reset the stream so that next call to index would return marker.
The marker will usually be index() but it doesn't have to be. It's
just a marker to indicate what state the stream was in. This is
essentially calling release() and seek(). If there are markers
created after this marker argument, this routine must unroll them
like a stack. Assume the state the stream was in when this marker
was created.
If marker is None:
Rewind to the input position of the last marker.
Used currently only after a cyclic DFA and just
before starting a sem/syn predicate to get the
input position back to the start of the decision.
Do not "pop" the marker off the state. mark(i)
and rewind(i) should balance still. It is
like invoking rewind(last marker) but it should not "pop"
the marker off. It's like seek(last marker's input position).
"""
raise NotImplementedError
def release(self, marker=None):
"""
You may want to commit to a backtrack but don't want to force the
stream to keep bookkeeping objects around for a marker that is
no longer necessary. This will have the same behavior as
rewind() except it releases resources without the backward seek.
This must throw away resources for all markers back to the marker
argument. So if you're nested 5 levels of mark(), and then release(2)
you have to release resources for depths 2..5.
"""
raise NotImplementedError
def seek(self, index):
"""
Set the input cursor to the position indicated by index. This is
normally used to seek ahead in the input stream. No buffering is
required to do this unless you know your stream will use seek to
move backwards such as when backtracking.
This is different from rewind in its multi-directional
requirement and in that its argument is strictly an input cursor
(index).
For char streams, seeking forward must update the stream state such
as line number. For seeking backwards, you will be presumably
backtracking using the mark/rewind mechanism that restores state and
so this method does not need to update state when seeking backwards.
Currently, this method is only used for efficient backtracking using
memoization, but in the future it may be used for incremental parsing.
The index is 0..n-1. A seek to position i means that LA(1) will
return the ith symbol. So, seeking to 0 means LA(1) will return the
first element in the stream.
"""
raise NotImplementedError
def size(self):
"""
Only makes sense for streams that buffer everything up probably, but
might be useful to display the entire stream or for testing. This
value includes a single EOF.
"""
raise NotImplementedError
def getSourceName(self):
"""
Where are you getting symbols from? Normally, implementations will
pass the buck all the way to the lexer who can ask its input stream
for the file name or whatever.
"""
raise NotImplementedError
class CharStream(IntStream):
"""
@brief A source of characters for an ANTLR lexer.
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
EOF = -1
def substring(self, start, stop):
"""
For infinite streams, you don't need this; primarily I'm providing
a useful interface for action code. Just make sure actions don't
use this on streams that don't support it.
"""
raise NotImplementedError
def LT(self, i):
"""
Get the ith character of lookahead. This is the same usually as
LA(i). This will be used for labels in the generated
lexer code. I'd prefer to return a char here type-wise, but it's
probably better to be 32-bit clean and be consistent with LA.
"""
raise NotImplementedError
def getLine(self):
"""ANTLR tracks the line information automatically"""
raise NotImplementedError
def setLine(self, line):
"""
Because this stream can rewind, we need to be able to reset the line
"""
raise NotImplementedError
def getCharPositionInLine(self):
"""
The index of the character relative to the beginning of the line 0..n-1
"""
raise NotImplementedError
def setCharPositionInLine(self, pos):
raise NotImplementedError
class TokenStream(IntStream):
"""
@brief A stream of tokens accessing tokens from a TokenSource
This is an abstract class that must be implemented by a subclass.
"""
# pylint does not realize that this is an interface, too
#pylint: disable-msg=W0223
def LT(self, k):
"""
Get Token at current input pointer + i ahead where i=1 is next Token.
i<0 indicates tokens in the past. So -1 is previous token and -2 is
two tokens ago. LT(0) is undefined. For i>=n, return Token.EOFToken.
Return null for LT(0) and any index that results in an absolute address
that is negative.
"""
raise NotImplementedError
def get(self, i):
"""
Get a token at an absolute index i; 0..n-1. This is really only
needed for profiling and debugging and token stream rewriting.
If you don't want to buffer up tokens, then this method makes no
sense for you. Naturally you can't use the rewrite stream feature.
I believe DebugTokenStream can easily be altered to not use
this method, removing the dependency.
"""
raise NotImplementedError
def getTokenSource(self):
"""
Where is this stream pulling tokens from? This is not the name, but
the object that provides Token objects.
"""
raise NotImplementedError
def toString(self, start=None, stop=None):
"""
Return the text of all tokens from start to stop, inclusive.
If the stream does not buffer all the tokens then it can just
return "" or null; Users should not access $ruleLabel.text in
an action of course in that case.
Because the user is not required to use a token with an index stored
in it, we must provide a means for two token objects themselves to
indicate the start/end location. Most often this will just delegate
to the other toString(int,int). This is also parallel with
the TreeNodeStream.toString(Object,Object).
"""
raise NotImplementedError
############################################################################
#
# character streams for use in lexers
# CharStream
# \- ANTLRStringStream
#
############################################################################
class ANTLRStringStream(CharStream):
"""
@brief CharStream that pull data from a unicode string.
A pretty quick CharStream that pulls all data from an array
directly. Every method call counts in the lexer.
"""
def __init__(self, data):
"""
@param data This should be a unicode string holding the data you want
to parse. If you pass in a byte string, the Lexer will choke on
non-ascii data.
"""
CharStream.__init__(self)
# The data being scanned
self.strdata = unicode(data)
self.data = [ord(c) for c in self.strdata]
# How many characters are actually in the buffer
self.n = len(data)
# 0..n-1 index into string of next char
self.p = 0
# line number 1..n within the input
self.line = 1
# The index of the character relative to the beginning of the
# line 0..n-1
self.charPositionInLine = 0
# A list of CharStreamState objects that tracks the stream state
# values line, charPositionInLine, and p that can change as you
# move through the input stream. Indexed from 0..markDepth-1.
self._markers = [ ]
self.lastMarker = None
self.markDepth = 0
# What is name or source of this char stream?
self.name = None
def reset(self):
"""
Reset the stream so that it's in the same state it was
when the object was created *except* the data array is not
touched.
"""
self.p = 0
self.line = 1
self.charPositionInLine = 0
self._markers = [ ]
def consume(self):
try:
if self.data[self.p] == 10: # \n
self.line += 1
self.charPositionInLine = 0
else:
self.charPositionInLine += 1
self.p += 1
except IndexError:
# happend when we reached EOF and self.data[self.p] fails
# just do nothing
pass
def LA(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.data[self.p+i-1]
except IndexError:
return EOF
def LT(self, i):
if i == 0:
return 0 # undefined
if i < 0:
i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
try:
return self.strdata[self.p+i-1]
except IndexError:
return EOF
def index(self):
"""
Return the current input symbol index 0..n where n indicates the
last symbol has been read. The index is the index of char to
be returned from LA(1).
"""
return self.p
def size(self):
return self.n
def mark(self):
state = (self.p, self.line, self.charPositionInLine)
try:
self._markers[self.markDepth] = state
except IndexError:
self._markers.append(state)
self.markDepth += 1
self.lastMarker = self.markDepth
return self.lastMarker
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
p, line, charPositionInLine = self._markers[marker-1]
self.seek(p)
self.line = line
self.charPositionInLine = charPositionInLine
self.release(marker)
def release(self, marker=None):
if marker is None:
marker = self.lastMarker
self.markDepth = marker-1
def seek(self, index):
"""
consume() ahead until p==index; can't just set p=index as we must
update line and charPositionInLine.
"""
if index <= self.p:
self.p = index # just jump; don't update stream state (line, ...)
return
# seek forward, consume until p hits index
while self.p < index:
self.consume()
def substring(self, start, stop):
return self.strdata[start:stop+1]
def getLine(self):
"""Using setter/getter methods is deprecated. Use o.line instead."""
return self.line
def getCharPositionInLine(self):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
return self.charPositionInLine
def setLine(self, line):
"""Using setter/getter methods is deprecated. Use o.line instead."""
self.line = line
def setCharPositionInLine(self, pos):
"""
Using setter/getter methods is deprecated. Use o.charPositionInLine
instead.
"""
self.charPositionInLine = pos
def getSourceName(self):
return self.name
class ANTLRFileStream(ANTLRStringStream):
"""
@brief CharStream that opens a file to read the data.
This is a char buffer stream that is loaded from a file
all at once when you construct the object.
"""
def __init__(self, fileName, encoding=None):
"""
@param fileName The path to the file to be opened. The file will be
opened with mode 'rb'.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
self.fileName = fileName
fp = codecs.open(fileName, 'rb', encoding)
try:
data = fp.read()
finally:
fp.close()
ANTLRStringStream.__init__(self, data)
def getSourceName(self):
"""Deprecated, access o.fileName directly."""
return self.fileName
class ANTLRInputStream(ANTLRStringStream):
"""
@brief CharStream that reads data from a file-like object.
This is a char buffer stream that is loaded from a file like object
all at once when you construct the object.
All input is consumed from the file, but it is not closed.
"""
def __init__(self, file, encoding=None):
"""
@param file A file-like object holding your input. Only the read()
method must be implemented.
@param encoding If you set the optional encoding argument, then the
data will be decoded on the fly.
"""
if encoding is not None:
# wrap input in a decoding reader
reader = codecs.lookup(encoding)[2]
file = reader(file)
data = file.read()
ANTLRStringStream.__init__(self, data)
# I guess the ANTLR prefix exists only to avoid a name clash with some Java
# mumbojumbo. A plain "StringStream" looks better to me, which should be
# the preferred name in Python.
StringStream = ANTLRStringStream
FileStream = ANTLRFileStream
InputStream = ANTLRInputStream
############################################################################
#
# Token streams
# TokenStream
# +- CommonTokenStream
# \- TokenRewriteStream
#
############################################################################
class CommonTokenStream(TokenStream):
"""
@brief The most common stream of tokens
The most common stream of tokens is one where every token is buffered up
and tokens are prefiltered for a certain channel (the parser will only
see these tokens and cannot change the filter channel number during the
parse).
"""
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
"""
@param tokenSource A TokenSource instance (usually a Lexer) to pull
the tokens from.
@param channel Skip tokens on any channel but this one; this is how we
skip whitespace...
"""
TokenStream.__init__(self)
self.tokenSource = tokenSource
# Record every single token pulled from the source so we can reproduce
# chunks of it later.
self.tokens = []
# Map<tokentype, channel> to override some Tokens' channel numbers
self.channelOverrideMap = {}
# Set<tokentype>; discard any tokens with this type
self.discardSet = set()
# Skip tokens on any channel but this one; this is how we skip whitespace...
self.channel = channel
# By default, track all incoming tokens
self.discardOffChannelTokens = False
# The index into the tokens list of the current token (next token
# to consume). p==-1 indicates that the tokens list is empty
self.p = -1
# Remember last marked position
self.lastMarker = None
def setTokenSource(self, tokenSource):
"""Reset this token stream by setting its token source."""
self.tokenSource = tokenSource
self.tokens = []
self.p = -1
self.channel = DEFAULT_CHANNEL
def reset(self):
self.p = 0
self.lastMarker = None
def fillBuffer(self):
"""
Load all tokens from the token source and put in tokens.
This is done upon first LT request because you might want to
set some token type / channel overrides before filling buffer.
"""
index = 0
t = self.tokenSource.nextToken()
while t is not None and t.type != EOF:
discard = False
if self.discardSet is not None and t.type in self.discardSet:
discard = True
elif self.discardOffChannelTokens and t.channel != self.channel:
discard = True
# is there a channel override for token type?
if t.type in self.channelOverrideMap:
overrideChannel = self.channelOverrideMap[t.type]
if overrideChannel == self.channel:
t.channel = overrideChannel
else:
discard = True
if not discard:
t.index = index
self.tokens.append(t)
index += 1
t = self.tokenSource.nextToken()
# leave p pointing at first token on channel
self.p = 0
self.p = self.skipOffTokenChannels(self.p)
def consume(self):
"""
Move the input pointer to the next incoming token. The stream
must become active with LT(1) available. consume() simply
moves the input pointer so that LT(1) points at the next
input symbol. Consume at least one token.
Walk past any token not on the channel the parser is listening to.
"""
if self.p < len(self.tokens):
self.p += 1
self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
def skipOffTokenChannels(self, i):
"""
Given a starting index, return the index of the first on-channel
token.
"""
try:
while self.tokens[i].channel != self.channel:
i += 1
except IndexError:
# hit the end of token stream
pass
return i
def skipOffTokenChannelsReverse(self, i):
while i >= 0 and self.tokens[i].channel != self.channel:
i -= 1
return i
def setTokenTypeChannel(self, ttype, channel):
"""
A simple filter mechanism whereby you can tell this token stream
to force all tokens of type ttype to be on channel. For example,
when interpreting, we cannot exec actions so we need to tell
the stream to force all WS and NEWLINE to be a different, ignored
channel.
"""
self.channelOverrideMap[ttype] = channel
def discardTokenType(self, ttype):
self.discardSet.add(ttype)
def getTokens(self, start=None, stop=None, types=None):
"""
Given a start and stop index, return a list of all tokens in
the token type set. Return None if no tokens were found. This
method looks at both on and off channel tokens.
"""
if self.p == -1:
self.fillBuffer()
if stop is None or stop >= len(self.tokens):
stop = len(self.tokens) - 1
if start is None or stop < 0:
start = 0
if start > stop:
return None
if isinstance(types, (int, long)):
# called with a single type, wrap into set
types = set([types])
filteredTokens = [
token for token in self.tokens[start:stop]
if types is None or token.type in types
]
if len(filteredTokens) == 0:
return None
return filteredTokens
def LT(self, k):
"""
Get the ith token from the current position 1..n where k=1 is the
first symbol of lookahead.
"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
i = self.p
n = 1
# find k good tokens
while n < k:
# skip off-channel tokens
i = self.skipOffTokenChannels(i+1) # leave p on valid token
n += 1
try:
return self.tokens[i]
except IndexError:
return EOF_TOKEN
def LB(self, k):
"""Look backwards k tokens on-channel tokens"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if self.p - k < 0:
return None
i = self.p
n = 1
# find k good tokens looking backwards
while n <= k:
# skip off-channel tokens
i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
n += 1
if i < 0:
return None
return self.tokens[i]
def get(self, i):
"""
Return absolute token i; ignore which channel the tokens are on;
that is, count all tokens not just on-channel tokens.
"""
return self.tokens[i]
def LA(self, i):
return self.LT(i).type
def mark(self):
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def size(self):
return len(self.tokens)
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
self.p = index
def getTokenSource(self):
return self.tokenSource
def getSourceName(self):
return self.tokenSource.getSourceName()
def toString(self, start=None, stop=None):
if self.p == -1:
self.fillBuffer()
if start is None:
start = 0
elif not isinstance(start, int):
start = start.index
if stop is None:
stop = len(self.tokens) - 1
elif not isinstance(stop, int):
stop = stop.index
if stop >= len(self.tokens):
stop = len(self.tokens) - 1
return ''.join([t.text for t in self.tokens[start:stop+1]])
class RewriteOperation(object):
"""@brief Internal helper class."""
def __init__(self, stream, index, text):
self.stream = stream
self.index = index
self.text = text
def execute(self, buf):
"""Execute the rewrite operation by possibly adding to the buffer.
Return the index of the next token to operate on.
"""
return self.index
def toString(self):
opName = self.__class__.__name__
return '<%s@%d:"%s">' % (opName, self.index, self.text)
__str__ = toString
__repr__ = toString
class InsertBeforeOp(RewriteOperation):
"""@brief Internal helper class."""
def execute(self, buf):
buf.write(self.text)
buf.write(self.stream.tokens[self.index].text)
return self.index + 1
class ReplaceOp(RewriteOperation):
"""
@brief Internal helper class.
I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
instructions.
"""
def __init__(self, stream, first, last, text):
RewriteOperation.__init__(self, stream, first, text)
self.lastIndex = last
def execute(self, buf):
if self.text is not None:
buf.write(self.text)
return self.lastIndex + 1
def toString(self):
return '<ReplaceOp@%d..%d:"%s">' % (
self.index, self.lastIndex, self.text)
__str__ = toString
__repr__ = toString
class DeleteOp(ReplaceOp):
"""
@brief Internal helper class.
"""
def __init__(self, stream, first, last):
ReplaceOp.__init__(self, stream, first, last, None)
def toString(self):
return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
__str__ = toString
__repr__ = toString
class TokenRewriteStream(CommonTokenStream):
"""@brief CommonTokenStream that can be modified.
Useful for dumping out the input stream after doing some
augmentation or other manipulations.
You can insert stuff, replace, and delete chunks. Note that the
operations are done lazily--only if you convert the buffer to a
String. This is very efficient because you are not moving data around
all the time. As the buffer of tokens is converted to strings, the
toString() method(s) check to see if there is an operation at the
current index. If so, the operation is done and then normal String
rendering continues on the buffer. This is like having multiple Turing
machine instruction streams (programs) operating on a single input tape. :)
Since the operations are done lazily at toString-time, operations do not
screw up the token index values. That is, an insert operation at token
index i does not change the index values for tokens i+1..n-1.
Because operations never actually alter the buffer, you may always get
the original token stream back without undoing anything. Since
the instructions are queued up, you can easily simulate transactions and
roll back any changes if there is an error just by removing instructions.
For example,
CharStream input = new ANTLRFileStream("input");
TLexer lex = new TLexer(input);
TokenRewriteStream tokens = new TokenRewriteStream(lex);
T parser = new T(tokens);
parser.startRule();
Then in the rules, you can execute
Token t,u;
...
input.insertAfter(t, "text to put after t");}
input.insertAfter(u, "text after u");}
System.out.println(tokens.toString());
Actually, you have to cast the 'input' to a TokenRewriteStream. :(
You can also have multiple "instruction streams" and get multiple
rewrites from a single pass over the input. Just name the instruction
streams and use that name again when printing the buffer. This could be
useful for generating a C file and also its header file--all from the
same buffer:
tokens.insertAfter("pass1", t, "text to put after t");}
tokens.insertAfter("pass2", u, "text after u");}
System.out.println(tokens.toString("pass1"));
System.out.println(tokens.toString("pass2"));
If you don't use named rewrite streams, a "default" stream is used as
the first example shows.
"""
DEFAULT_PROGRAM_NAME = "default"
MIN_TOKEN_INDEX = 0
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
CommonTokenStream.__init__(self, tokenSource, channel)
# You may have multiple, named streams of rewrite operations.
# I'm calling these things "programs."
# Maps String (name) -> rewrite (List)
self.programs = {}
self.programs[self.DEFAULT_PROGRAM_NAME] = []
# Map String (program name) -> Integer index
self.lastRewriteTokenIndexes = {}
def rollback(self, *args):
"""
Rollback the instruction stream for a program so that
the indicated instruction (via instructionIndex) is no
longer in the stream. UNTESTED!
"""
if len(args) == 2:
programName = args[0]
instructionIndex = args[1]
elif len(args) == 1:
programName = self.DEFAULT_PROGRAM_NAME
instructionIndex = args[0]
else:
raise TypeError("Invalid arguments")
p = self.programs.get(programName, None)
if p is not None:
self.programs[programName] = (
p[self.MIN_TOKEN_INDEX:instructionIndex])
def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
"""Reset the program so that no instructions exist"""
self.rollback(programName, self.MIN_TOKEN_INDEX)
def insertAfter(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
# to insert after, just insert before next index (even if past end)
self.insertBefore(programName, index+1, text)
def insertBefore(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
op = InsertBeforeOp(self, index, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def replace(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[0]
text = args[1]
elif len(args) == 3:
programName = self.DEFAULT_PROGRAM_NAME
first = args[0]
last = args[1]
text = args[2]
elif len(args) == 4:
programName = args[0]
first = args[1]
last = args[2]
text = args[3]
else:
raise TypeError("Invalid arguments")
if isinstance(first, Token):
# first is a Token, grap the stream index from it
first = first.index
if isinstance(last, Token):
# last is a Token, grap the stream index from it
last = last.index
if first > last or first < 0 or last < 0 or last >= len(self.tokens):
raise ValueError(
"replace: range invalid: "+first+".."+last+
"(size="+len(self.tokens)+")")
op = ReplaceOp(self, first, last, text)
rewrites = self.getProgram(programName)
rewrites.append(op)
def delete(self, *args):
self.replace(*(list(args) + [None]))
def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(programName, -1)
def setLastRewriteTokenIndex(self, programName, i):
self.lastRewriteTokenIndexes[programName] = i
def getProgram(self, name):
p = self.programs.get(name, None)
if p is None:
p = self.initializeProgram(name)
return p
def initializeProgram(self, name):
p = []
self.programs[name] = p
return p
def toOriginalString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i).text)
i += 1
return buf.getvalue()
def toString(self, *args):
if len(args) == 0:
programName = self.DEFAULT_PROGRAM_NAME
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 1:
programName = args[0]
start = self.MIN_TOKEN_INDEX
end = self.size() - 1
elif len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
start = args[0]
end = args[1]
if start is None:
start = self.MIN_TOKEN_INDEX
elif not isinstance(start, int):
start = start.index
if end is None:
end = len(self.tokens) - 1
elif not isinstance(end, int):
end = end.index
# ensure start/end are in range
if end >= len(self.tokens):
end = len(self.tokens) - 1
if start < 0:
start = 0
rewrites = self.programs.get(programName)
if rewrites is None or len(rewrites) == 0:
# no instructions to execute
return self.toOriginalString(start, end)
buf = StringIO()
# First, optimize instruction stream
indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
# Walk buffer, executing instructions and emitting tokens
i = start
while i <= end and i < len(self.tokens):
op = indexToOp.get(i)
# remove so any left have index size-1
try:
del indexToOp[i]
except KeyError:
pass
t = self.tokens[i]
if op is None:
# no operation at that index, just dump token
buf.write(t.text)
i += 1 # move to next token
else:
i = op.execute(buf) # execute operation and skip
# include stuff after end if it's last index in buffer
# So, if they did an insertAfter(lastValidIndex, "foo"), include
# foo if end==lastValidIndex.
if end == len(self.tokens) - 1:
# Scan any remaining operations after last token
# should be included (they will be inserts).
for i in sorted(indexToOp.keys()):
op = indexToOp[i]
if op.index >= len(self.tokens)-1:
buf.write(op.text)
return buf.getvalue()
__str__ = toString
def reduceToSingleOperationPerIndex(self, rewrites):
"""
We need to combine operations and report invalid operations (like
overlapping replaces that are not completed nested). Inserts to
same index need to be combined etc... Here are the cases:
I.i.u I.j.v leave alone, nonoverlapping
I.i.u I.i.v combine: Iivu
R.i-j.u R.x-y.v | i-j in x-y delete first R
R.i-j.u R.i-j.v delete first R
R.i-j.u R.x-y.v | x-y in i-j ERROR
R.i-j.u R.x-y.v | boundaries overlap ERROR
I.i.u R.x-y.v | i in x-y delete I
I.i.u R.x-y.v | i not in x-y leave alone, nonoverlapping
R.x-y.v I.i.u | i in x-y ERROR
R.x-y.v I.x.u R.x-y.uv (combine, delete I)
R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
I.i.u = insert u before op @ index i
R.x-y.u = replace x-y indexed tokens with u
First we need to examine replaces. For any replace op:
1. wipe out any insertions before op within that range.
2. Drop any replace op before that is contained completely within
that range.
3. Throw exception upon boundary overlap with any previous replace.
Then we can deal with inserts:
1. for any inserts to same index, combine even if not adjacent.
2. for any prior replace with same left boundary, combine this
insert with replace and delete this replace.
3. throw exception if index in same range as previous replace
Don't actually delete; make op null in list. Easier to walk list.
Later we can throw as we add to index -> op map.
Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
inserted stuff would be before the replace range. But, if you
add tokens in front of a method body '{' and then delete the method
body, I think the stuff before the '{' you added should disappear too.
Return a map from token index to operation.
"""
# WALK REPLACES
for i, rop in enumerate(rewrites):
if rop is None:
continue
if not isinstance(rop, ReplaceOp):
continue
# Wipe prior inserts within range
for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if iop.index >= rop.index and iop.index <= rop.lastIndex:
rewrites[j] = None # delete insert as it's a no-op.
# Drop any prior replaces contained within
for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
if (prevRop.index >= rop.index
and prevRop.lastIndex <= rop.lastIndex):
rewrites[j] = None # delete replace as it's a no-op.
continue
# throw exception unless disjoint or identical
disjoint = (prevRop.lastIndex < rop.index
or prevRop.index > rop.lastIndex)
same = (prevRop.index == rop.index
and prevRop.lastIndex == rop.lastIndex)
if not disjoint and not same:
raise ValueError(
"replace op boundaries of %s overlap with previous %s"
% (rop, prevRop))
# WALK INSERTS
for i, iop in enumerate(rewrites):
if iop is None:
continue
if not isinstance(iop, InsertBeforeOp):
continue
# combine current insert with prior if any at same index
for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
if prevIop.index == iop.index: # combine objects
# convert to strings...we're in process of toString'ing
# whole token buffer so no lazy eval issue with any
# templates
iop.text = self.catOpText(iop.text, prevIop.text)
rewrites[j] = None # delete redundant prior insert
# look for replaces where iop.index is in range; error
for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
if iop.index == rop.index:
rop.text = self.catOpText(iop.text, rop.text)
rewrites[i] = None # delete current insert
continue
if iop.index >= rop.index and iop.index <= rop.lastIndex:
raise ValueError(
"insert op %s within boundaries of previous %s"
% (iop, rop))
m = {}
for i, op in enumerate(rewrites):
if op is None:
continue # ignore deleted ops
assert op.index not in m, "should only be one op per index"
m[op.index] = op
return m
def catOpText(self, a, b):
x = ""
y = ""
if a is not None:
x = a
if b is not None:
y = b
return x + y
def getKindOfOps(self, rewrites, kind, before=None):
if before is None:
before = len(rewrites)
elif before > len(rewrites):
before = len(rewrites)
for i, op in enumerate(rewrites[:before]):
if op is None:
# ignore deleted
continue
if op.__class__ == kind:
yield i, op
def toDebugString(self, start=None, end=None):
if start is None:
start = self.MIN_TOKEN_INDEX
if end is None:
end = self.size() - 1
buf = StringIO()
i = start
while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
buf.write(self.get(i))
i += 1
return buf.getvalue()
| 43,486 | Python | .py | 1,038 | 32.065511 | 79 | 0.607213 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,323 | dfa.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/dfa.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licensc]
from .constants import EOF
from .exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in xrange(50000):
#print "***Current state = %d" % s
specialState = self.special[s]
if specialState >= 0:
#print "is special"
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
#print "accept state for alt %d" % self.accept[s]
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
#print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
#print "range = %d..%d" % (self.min[s], self.max[s])
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
#print "in range, next state = %d" % snext
if snext < 0:
#print "not a normal transition"
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
#print "EOT trans to accept state %d" % self.eot[s]
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
#print "no viable alt"
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
#print "EOT to %d" % self.eot[s]
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
#print "EOF Transition to accept state %d" \
# % self.accept[self.eof[s]]
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
(n, v) = ord(string[i*2]), ord(string[i*2+1])
# Is there a bitwise operation to do this?
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret
unpack = classmethod(unpack)
| 7,619 | Python | .py | 166 | 33.23494 | 79 | 0.579786 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,324 | tokens.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/tokens.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
def getText(self):
"""@brief Get the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def setText(self, text):
"""@brief Set the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def getType(self):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def setType(self, ttype):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def getLine(self):
"""@brief Get the line number on which this token was matched
Lines are numbered 1..n
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def getCharPositionInLine(self):
"""@brief Get the column of the tokens first character,
Columns are numbered 0..n-1
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def setCharPositionInLine(self, pos):
"""@brief Set the column of the tokens first character,
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def getChannel(self):
"""@brief Get the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def setChannel(self, channel):
"""@brief Set the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def getTokenIndex(self):
"""@brief Get the index in the input stream.
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def setTokenIndex(self, index):
"""@brief Set the index in the input stream.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
Token.__init__(self)
if oldToken is not None:
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.index = oldToken.index
self._text = oldToken._text
if isinstance(oldToken, CommonToken):
self.input = oldToken.input
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.type = type
self.input = input
self.charPositionInLine = -1 # set to invalid position
self.line = 0
self.channel = channel
#What token number is this from 0..n-1 tokens; < 0 implies invalid index
self.index = -1
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
def getText(self):
if self._text is not None:
return self._text
if self.input is None:
return None
return self.input.substring(self.start, self.stop)
def setText(self, text):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = text
text = property(getText, setText)
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\\\n")
txt = txt.replace("\r","\\\\r")
txt = txt.replace("\t","\\\\t")
else:
txt = "<no text>"
return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
self.index,
self.start, self.stop,
txt,
self.type, channelStr,
self.line, self.charPositionInLine
)
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None
):
Token.__init__(self)
if oldToken is not None:
self.text = oldToken.text
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.text = text
self.type = type
self.line = None
self.charPositionInLine = None
self.channel = channel
self.index = None
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is None:
txt = "<no text>"
return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
txt,
self.type,
channelStr,
self.line,
self.charPositionInLine
)
__str__ = toString
__repr__ = toString
EOF_TOKEN = CommonToken(type=EOF)
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| 12,010 | Python | .py | 281 | 33.359431 | 87 | 0.627277 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,325 | constants.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/constants.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
EOF = -1
## All tokens go to the parser (unless skip() is called in that rule)
# on a particular "channel". The parser tunes to a particular channel
# so that whitespace etc... can go to the parser on a "hidden" channel.
DEFAULT_CHANNEL = 0
## Anything on different channel than DEFAULT_CHANNEL is not parsed
# by parser.
HIDDEN_CHANNEL = 99
# Predefined token types
EOR_TOKEN_TYPE = 1
##
# imaginary tree navigation type; traverse "get child" link
DOWN = 2
##
#imaginary tree navigation type; finish with a child list
UP = 3
MIN_TOKEN_TYPE = UP+1
INVALID_TOKEN_TYPE = 0
| 2,111 | Python | .py | 48 | 42.770833 | 75 | 0.779834 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,326 | __init__.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/__init__.py | """ @package antlr3
@brief ANTLR3 runtime package
This module contains all support classes, which are needed to use recognizers
generated by ANTLR3.
@mainpage
\note Please be warned that the line numbers in the API documentation do not
match the real locations in the source code of the package. This is an
unintended artifact of doxygen, which I could only convince to use the
correct module names by concatenating all files from the package into a single
module file...
Here is a little overview over the most commonly used classes provided by
this runtime:
@section recognizers Recognizers
These recognizers are baseclasses for the code which is generated by ANTLR3.
- BaseRecognizer: Base class with common recognizer functionality.
- Lexer: Base class for lexers.
- Parser: Base class for parsers.
- tree.TreeParser: Base class for %tree parser.
@section streams Streams
Each recognizer pulls its input from one of the stream classes below. Streams
handle stuff like buffering, look-ahead and seeking.
A character stream is usually the first element in the pipeline of a typical
ANTLR3 application. It is used as the input for a Lexer.
- ANTLRStringStream: Reads from a string objects. The input should be a unicode
object, or ANTLR3 will have trouble decoding non-ascii data.
- ANTLRFileStream: Opens a file and read the contents, with optional character
decoding.
- ANTLRInputStream: Reads the date from a file-like object, with optional
character decoding.
A Parser needs a TokenStream as input (which in turn is usually fed by a
Lexer):
- CommonTokenStream: A basic and most commonly used TokenStream
implementation.
- TokenRewriteStream: A modification of CommonTokenStream that allows the
stream to be altered (by the Parser). See the 'tweak' example for a usecase.
And tree.TreeParser finally fetches its input from a tree.TreeNodeStream:
- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream
implementation.
@section tokenstrees Tokens and Trees
A Lexer emits Token objects which are usually buffered by a TokenStream. A
Parser can build a Tree, if the output=AST option has been set in the grammar.
The runtime provides these Token implementations:
- CommonToken: A basic and most commonly used Token implementation.
- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree
construction.
Tree objects are wrapper for Token objects.
- tree.CommonTree: A basic and most commonly used Tree implementation.
A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the
input Token objects.
- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor
implementation.
@section Exceptions
RecognitionException are generated, when a recognizer encounters incorrect
or unexpected input.
- RecognitionException
- MismatchedRangeException
- MismatchedSetException
- MismatchedNotSetException
.
- MismatchedTokenException
- MismatchedTreeNodeException
- NoViableAltException
- EarlyExitException
- FailedPredicateException
.
.
A tree.RewriteCardinalityException is raised, when the parsers hits a
cardinality mismatch during AST construction. Although this is basically a
bug in your grammar, it can only be detected at runtime.
- tree.RewriteCardinalityException
- tree.RewriteEarlyExitException
- tree.RewriteEmptyStreamException
.
.
"""
# tree.RewriteRuleElementStream
# tree.RewriteRuleSubtreeStream
# tree.RewriteRuleTokenStream
# CharStream
# DFA
# TokenSource
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '3.1.3'
def version_str_to_tuple(version_str):
import re
import sys
if version_str == 'HEAD':
return (sys.maxint, sys.maxint, sys.maxint, sys.maxint)
m = re.match(r'(\d+)\.(\d+)(\.(\d+))?(b(\d+))?', version_str)
if m is None:
raise ValueError("Bad version string %r" % version_str)
major = int(m.group(1))
minor = int(m.group(2))
patch = int(m.group(4) or 0)
beta = int(m.group(6) or sys.maxint)
return (major, minor, patch, beta)
runtime_version_str = __version__
runtime_version = version_str_to_tuple(runtime_version_str)
from .constants import *
from .dfa import *
from .exceptions import *
from .recognizers import *
from .streams import *
from .tokens import *
| 5,724 | Python | .py | 128 | 42.585938 | 79 | 0.793911 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,327 | debug.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/debug.py | # begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2009 Terence Parr
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import socket
from . import Parser, TokenStream, RecognitionException, Token
from .tree import CommonTreeAdaptor, TreeAdaptor, Tree
class DebugParser(Parser):
def __init__(self, stream, state=None, dbg=None, *args, **kwargs):
# wrap token stream in DebugTokenStream (unless user already did so).
if not isinstance(stream, DebugTokenStream):
stream = DebugTokenStream(stream, dbg)
super(DebugParser, self).__init__(stream, state, *args, **kwargs)
# Who to notify when events in the parser occur.
self._dbg = None
self.setDebugListener(dbg)
def setDebugListener(self, dbg):
"""Provide a new debug event listener for this parser. Notify the
input stream too that it should send events to this listener.
"""
if hasattr(self.input, 'dbg'):
self.input.dbg = dbg
self._dbg = dbg
def getDebugListener(self):
return self._dbg
dbg = property(getDebugListener, setDebugListener)
def beginResync(self):
self._dbg.beginResync()
def endResync(self):
self._dbg.endResync()
def beginBacktrack(self, level):
self._dbg.beginBacktrack(level)
def endBacktrack(self, level, successful):
self._dbg.endBacktrack(level,successful)
def reportError(self, exc):
if isinstance(exc, RecognitionException):
self._dbg.recognitionException(exc)
else:
traceback.print_exc(exc)
class DebugTokenStream(TokenStream):
def __init__(self, input, dbg=None):
self.input = input
self.initialStreamState = True
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
self._dbg = None
self.setDebugListener(dbg)
# force TokenStream to get at least first valid token
# so we know if there are any hidden tokens first in the stream
self.input.LT(1)
def getDebugListener(self):
return self._dbg
def setDebugListener(self, dbg):
self._dbg = dbg
dbg = property(getDebugListener, setDebugListener)
def consume(self):
if self.initialStreamState:
self.consumeInitialHiddenTokens()
a = self.input.index()
t = self.input.LT(1)
self.input.consume()
b = self.input.index()
self._dbg.consumeToken(t)
if b > a+1:
# then we consumed more than one token; must be off channel tokens
for idx in range(a+1, b):
self._dbg.consumeHiddenToken(self.input.get(idx));
def consumeInitialHiddenTokens(self):
"""consume all initial off-channel tokens"""
firstOnChannelTokenIndex = self.input.index()
for idx in range(firstOnChannelTokenIndex):
self._dbg.consumeHiddenToken(self.input.get(idx))
self.initialStreamState = False
def LT(self, i):
if self.initialStreamState:
self.consumeInitialHiddenTokens()
t = self.input.LT(i)
self._dbg.LT(i, t)
return t
def LA(self, i):
if self.initialStreamState:
self.consumeInitialHiddenTokens()
t = self.input.LT(i)
self._dbg.LT(i, t)
return t.type
def get(self, i):
return self.input.get(i)
def index(self):
return self.input.index()
def mark(self):
self.lastMarker = self.input.mark()
self._dbg.mark(self.lastMarker)
return self.lastMarker
def rewind(self, marker=None):
self._dbg.rewind(marker)
self.input.rewind(marker)
def release(self, marker):
pass
def seek(self, index):
# TODO: implement seek in dbg interface
# self._dbg.seek(index);
self.input.seek(index)
def size(self):
return self.input.size()
def getTokenSource(self):
return self.input.getTokenSource()
def getSourceName(self):
return self.getTokenSource().getSourceName()
def toString(self, start=None, stop=None):
return self.input.toString(start, stop)
class DebugTreeAdaptor(TreeAdaptor):
"""A TreeAdaptor proxy that fires debugging events to a DebugEventListener
delegate and uses the TreeAdaptor delegate to do the actual work. All
AST events are triggered by this adaptor; no code gen changes are needed
in generated rules. Debugging events are triggered *after* invoking
tree adaptor routines.
Trees created with actions in rewrite actions like "-> ^(ADD {foo} {bar})"
cannot be tracked as they might not use the adaptor to create foo, bar.
The debug listener has to deal with tree node IDs for which it did
not see a createNode event. A single <unknown> node is sufficient even
if it represents a whole tree.
"""
def __init__(self, dbg, adaptor):
self.dbg = dbg
self.adaptor = adaptor
def createWithPayload(self, payload):
if payload.getTokenIndex() < 0:
# could be token conjured up during error recovery
return self.createFromType(payload.getType(), payload.getText())
node = self.adaptor.createWithPayload(payload)
self.dbg.createNode(node, payload)
return node
def createFromToken(self, tokenType, fromToken, text=None):
node = self.adaptor.createFromToken(tokenType, fromToken, text)
self.dbg.createNode(node)
return node
def createFromType(self, tokenType, text):
node = self.adaptor.createFromType(tokenType, text)
self.dbg.createNode(node)
return node
def errorNode(self, input, start, stop, exc):
node = selfadaptor.errorNode(input, start, stop, exc)
if node is not None:
dbg.errorNode(node)
return node
def dupTree(self, tree):
t = self.adaptor.dupTree(tree)
# walk the tree and emit create and add child events
# to simulate what dupTree has done. dupTree does not call this debug
# adapter so I must simulate.
self.simulateTreeConstruction(t)
return t
def simulateTreeConstruction(self, t):
"""^(A B C): emit create A, create B, add child, ..."""
self.dbg.createNode(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self.simulateTreeConstruction(child)
self.dbg.addChild(t, child)
def dupNode(self, treeNode):
d = self.adaptor.dupNode(treeNode)
self.dbg.createNode(d)
return d
def nil(self):
node = self.adaptor.nil()
self.dbg.nilNode(node)
return node
def isNil(self, tree):
return self.adaptor.isNil(tree)
def addChild(self, t, child):
if isinstance(child, Token):
n = self.createWithPayload(child)
self.addChild(t, n)
else:
if t is None or child is None:
return
self.adaptor.addChild(t, child)
self.dbg.addChild(t, child)
def becomeRoot(self, newRoot, oldRoot):
if isinstance(newRoot, Token):
n = self.createWithPayload(newRoot)
self.adaptor.becomeRoot(n, oldRoot)
else:
n = self.adaptor.becomeRoot(newRoot, oldRoot)
self.dbg.becomeRoot(newRoot, oldRoot)
return n
def rulePostProcessing(self, root):
return self.adaptor.rulePostProcessing(root)
def getType(self, t):
return self.adaptor.getType(t)
def setType(self, t, type):
self.adaptor.setType(t, type)
def getText(self, t):
return self.adaptor.getText(t)
def setText(self, t, text):
self.adaptor.setText(t, text)
def getToken(self, t):
return self.adaptor.getToken(t)
def setTokenBoundaries(self, t, startToken, stopToken):
self.adaptor.setTokenBoundaries(t, startToken, stopToken)
if t is not None and startToken is not None and stopToken is not None:
self.dbg.setTokenBoundaries(
t, startToken.getTokenIndex(),
stopToken.getTokenIndex())
def getTokenStartIndex(self, t):
return self.adaptor.getTokenStartIndex(t)
def getTokenStopIndex(self, t):
return self.adaptor.getTokenStopIndex(t)
def getChild(self, t, i):
return self.adaptor.getChild(t, i)
def setChild(self, t, i, child):
self.adaptor.setChild(t, i, child)
def deleteChild(self, t, i):
return self.adaptor.deleteChild(t, i)
def getChildCount(self, t):
return self.adaptor.getChildCount(t)
def getUniqueID(self, node):
return self.adaptor.getUniqueID(node)
def getParent(self, t):
return self.adaptor.getParent(t)
def getChildIndex(self, t):
return self.adaptor.getChildIndex(t)
def setParent(self, t, parent):
self.adaptor.setParent(t, parent)
def setChildIndex(self, t, index):
self.adaptor.setChildIndex(t, index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
self.adaptor.replaceChildren(parent, startChildIndex, stopChildIndex, t)
## support
def getDebugListener(self):
return dbg
def setDebugListener(self, dbg):
self.dbg = dbg
def getTreeAdaptor(self):
return self.adaptor
class DebugEventListener(object):
"""All debugging events that a recognizer can trigger.
I did not create a separate AST debugging interface as it would create
lots of extra classes and DebugParser has a dbg var defined, which makes
it hard to change to ASTDebugEventListener. I looked hard at this issue
and it is easier to understand as one monolithic event interface for all
possible events. Hopefully, adding ST debugging stuff won't be bad. Leave
for future. 4/26/2006.
"""
# Moved to version 2 for v3.1: added grammar name to enter/exit Rule
PROTOCOL_VERSION = "2"
def enterRule(self, grammarFileName, ruleName):
"""The parser has just entered a rule. No decision has been made about
which alt is predicted. This is fired AFTER init actions have been
executed. Attributes are defined and available etc...
The grammarFileName allows composite grammars to jump around among
multiple grammar files.
"""
pass
def enterAlt(self, alt):
"""Because rules can have lots of alternatives, it is very useful to
know which alt you are entering. This is 1..n for n alts.
"""
pass
def exitRule(self, grammarFileName, ruleName):
"""This is the last thing executed before leaving a rule. It is
executed even if an exception is thrown. This is triggered after
error reporting and recovery have occurred (unless the exception is
not caught in this rule). This implies an "exitAlt" event.
The grammarFileName allows composite grammars to jump around among
multiple grammar files.
"""
pass
def enterSubRule(self, decisionNumber):
"""Track entry into any (...) subrule other EBNF construct"""
pass
def exitSubRule(self, decisionNumber):
pass
def enterDecision(self, decisionNumber):
"""Every decision, fixed k or arbitrary, has an enter/exit event
so that a GUI can easily track what LT/consume events are
associated with prediction. You will see a single enter/exit
subrule but multiple enter/exit decision events, one for each
loop iteration.
"""
pass
def exitDecision(self, decisionNumber):
pass
def consumeToken(self, t):
"""An input token was consumed; matched by any kind of element.
Trigger after the token was matched by things like match(), matchAny().
"""
pass
def consumeHiddenToken(self, t):
"""An off-channel input token was consumed.
Trigger after the token was matched by things like match(), matchAny().
(unless of course the hidden token is first stuff in the input stream).
"""
pass
def LT(self, i, t):
"""Somebody (anybody) looked ahead. Note that this actually gets
triggered by both LA and LT calls. The debugger will want to know
which Token object was examined. Like consumeToken, this indicates
what token was seen at that depth. A remote debugger cannot look
ahead into a file it doesn't have so LT events must pass the token
even if the info is redundant.
"""
pass
def mark(self, marker):
"""The parser is going to look arbitrarily ahead; mark this location,
the token stream's marker is sent in case you need it.
"""
pass
def rewind(self, marker=None):
"""After an arbitrairly long lookahead as with a cyclic DFA (or with
any backtrack), this informs the debugger that stream should be
rewound to the position associated with marker.
"""
pass
def beginBacktrack(self, level):
pass
def endBacktrack(self, level, successful):
pass
def location(self, line, pos):
"""To watch a parser move through the grammar, the parser needs to
inform the debugger what line/charPos it is passing in the grammar.
For now, this does not know how to switch from one grammar to the
other and back for island grammars etc...
This should also allow breakpoints because the debugger can stop
the parser whenever it hits this line/pos.
"""
pass
def recognitionException(self, e):
"""A recognition exception occurred such as NoViableAltException. I made
this a generic event so that I can alter the exception hierachy later
without having to alter all the debug objects.
Upon error, the stack of enter rule/subrule must be properly unwound.
If no viable alt occurs it is within an enter/exit decision, which
also must be rewound. Even the rewind for each mark must be unwount.
In the Java target this is pretty easy using try/finally, if a bit
ugly in the generated code. The rewind is generated in DFA.predict()
actually so no code needs to be generated for that. For languages
w/o this "finally" feature (C++?), the target implementor will have
to build an event stack or something.
Across a socket for remote debugging, only the RecognitionException
data fields are transmitted. The token object or whatever that
caused the problem was the last object referenced by LT. The
immediately preceding LT event should hold the unexpected Token or
char.
Here is a sample event trace for grammar:
b : C ({;}A|B) // {;} is there to prevent A|B becoming a set
| D
;
The sequence for this rule (with no viable alt in the subrule) for
input 'c c' (there are 3 tokens) is:
commence
LT(1)
enterRule b
location 7 1
enter decision 3
LT(1)
exit decision 3
enterAlt1
location 7 5
LT(1)
consumeToken [c/<4>,1:0]
location 7 7
enterSubRule 2
enter decision 2
LT(1)
LT(1)
recognitionException NoViableAltException 2 1 2
exit decision 2
exitSubRule 2
beginResync
LT(1)
consumeToken [c/<4>,1:1]
LT(1)
endResync
LT(-1)
exitRule b
terminate
"""
pass
def beginResync(self):
"""Indicates the recognizer is about to consume tokens to resynchronize
the parser. Any consume events from here until the recovered event
are not part of the parse--they are dead tokens.
"""
pass
def endResync(self):
"""Indicates that the recognizer has finished consuming tokens in order
to resychronize. There may be multiple beginResync/endResync pairs
before the recognizer comes out of errorRecovery mode (in which
multiple errors are suppressed). This will be useful
in a gui where you want to probably grey out tokens that are consumed
but not matched to anything in grammar. Anything between
a beginResync/endResync pair was tossed out by the parser.
"""
pass
def semanticPredicate(self, result, predicate):
"""A semantic predicate was evaluate with this result and action text"""
pass
def commence(self):
"""Announce that parsing has begun. Not technically useful except for
sending events over a socket. A GUI for example will launch a thread
to connect and communicate with a remote parser. The thread will want
to notify the GUI when a connection is made. ANTLR parsers
trigger this upon entry to the first rule (the ruleLevel is used to
figure this out).
"""
pass
def terminate(self):
"""Parsing is over; successfully or not. Mostly useful for telling
remote debugging listeners that it's time to quit. When the rule
invocation level goes to zero at the end of a rule, we are done
parsing.
"""
pass
## T r e e P a r s i n g
def consumeNode(self, t):
"""Input for a tree parser is an AST, but we know nothing for sure
about a node except its type and text (obtained from the adaptor).
This is the analog of the consumeToken method. Again, the ID is
the hashCode usually of the node so it only works if hashCode is
not implemented. If the type is UP or DOWN, then
the ID is not really meaningful as it's fixed--there is
just one UP node and one DOWN navigation node.
"""
pass
def LT(self, i, t):
"""The tree parser lookedahead. If the type is UP or DOWN,
then the ID is not really meaningful as it's fixed--there is
just one UP node and one DOWN navigation node.
"""
pass
## A S T E v e n t s
def nilNode(self, t):
"""A nil was created (even nil nodes have a unique ID...
they are not "null" per se). As of 4/28/2006, this
seems to be uniquely triggered when starting a new subtree
such as when entering a subrule in automatic mode and when
building a tree in rewrite mode.
If you are receiving this event over a socket via
RemoteDebugEventSocketListener then only t.ID is set.
"""
pass
def errorNode(self, t):
"""Upon syntax error, recognizers bracket the error with an error node
if they are building ASTs.
"""
pass
def createNode(self, node, token=None):
"""Announce a new node built from token elements such as type etc...
If you are receiving this event over a socket via
RemoteDebugEventSocketListener then only t.ID, type, text are
set.
"""
pass
def becomeRoot(self, newRoot, oldRoot):
"""Make a node the new root of an existing root.
Note: the newRootID parameter is possibly different
than the TreeAdaptor.becomeRoot() newRoot parameter.
In our case, it will always be the result of calling
TreeAdaptor.becomeRoot() and not root_n or whatever.
The listener should assume that this event occurs
only when the current subrule (or rule) subtree is
being reset to newRootID.
If you are receiving this event over a socket via
RemoteDebugEventSocketListener then only IDs are set.
@see antlr3.tree.TreeAdaptor.becomeRoot()
"""
pass
def addChild(self, root, child):
"""Make childID a child of rootID.
If you are receiving this event over a socket via
RemoteDebugEventSocketListener then only IDs are set.
@see antlr3.tree.TreeAdaptor.addChild()
"""
pass
def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
"""Set the token start/stop token index for a subtree root or node.
If you are receiving this event over a socket via
RemoteDebugEventSocketListener then only t.ID is set.
"""
pass
class BlankDebugEventListener(DebugEventListener):
"""A blank listener that does nothing; useful for real classes so
they don't have to have lots of blank methods and are less
sensitive to updates to debug interface.
Note: this class is identical to DebugEventListener and exists purely
for compatibility with Java.
"""
pass
class TraceDebugEventListener(DebugEventListener):
"""A listener that simply records text representations of the events.
Useful for debugging the debugging facility ;)
Subclasses can override the record() method (which defaults to printing to
stdout) to record the events in a different way.
"""
def __init__(self, adaptor=None):
super(TraceDebugEventListener, self).__init__()
if adaptor is None:
adaptor = CommonTreeAdaptor()
self.adaptor = adaptor
def record(self, event):
sys.stdout.write(event + '\n')
def enterRule(self, grammarFileName, ruleName):
self.record("enterRule "+ruleName)
def exitRule(self, grammarFileName, ruleName):
self.record("exitRule "+ruleName)
def enterSubRule(self, decisionNumber):
self.record("enterSubRule")
def exitSubRule(self, decisionNumber):
self.record("exitSubRule")
def location(self, line, pos):
self.record("location %s:%s" % (line, pos))
## Tree parsing stuff
def consumeNode(self, t):
self.record("consumeNode %s %s %s" % (
self.adaptor.getUniqueID(t),
self.adaptor.getText(t),
self.adaptor.getType(t)))
def LT(self, i, t):
self.record("LT %s %s %s %s" % (
i,
self.adaptor.getUniqueID(t),
self.adaptor.getText(t),
self.adaptor.getType(t)))
## AST stuff
def nilNode(self, t):
self.record("nilNode %s" % self.adaptor.getUniqueID(t))
def createNode(self, t, token=None):
if token is None:
self.record("create %s: %s, %s" % (
self.adaptor.getUniqueID(t),
self.adaptor.getText(t),
self.adaptor.getType(t)))
else:
self.record("create %s: %s" % (
self.adaptor.getUniqueID(t),
token.getTokenIndex()))
def becomeRoot(self, newRoot, oldRoot):
self.record("becomeRoot %s, %s" % (
self.adaptor.getUniqueID(newRoot),
self.adaptor.getUniqueID(oldRoot)))
def addChild(self, root, child):
self.record("addChild %s, %s" % (
self.adaptor.getUniqueID(root),
self.adaptor.getUniqueID(child)))
def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
self.record("setTokenBoundaries %s, %s, %s" % (
self.adaptor.getUniqueID(t),
tokenStartIndex, tokenStopIndex))
class RecordDebugEventListener(TraceDebugEventListener):
"""A listener that records events as strings in an array."""
def __init__(self, adaptor=None):
super(RecordDebugEventListener, self).__init__(adaptor)
self.events = []
def record(self, event):
self.events.append(event)
class DebugEventSocketProxy(DebugEventListener):
"""A proxy debug event listener that forwards events over a socket to
a debugger (or any other listener) using a simple text-based protocol;
one event per line. ANTLRWorks listens on server socket with a
RemoteDebugEventSocketListener instance. These two objects must therefore
be kept in sync. New events must be handled on both sides of socket.
"""
DEFAULT_DEBUGGER_PORT = 49100
def __init__(self, recognizer, adaptor=None, port=None,
debug=None):
super(DebugEventSocketProxy, self).__init__()
self.grammarFileName = recognizer.getGrammarFileName()
# Almost certainly the recognizer will have adaptor set, but
# we don't know how to cast it (Parser or TreeParser) to get
# the adaptor field. Must be set with a constructor. :(
self.adaptor = adaptor
self.port = port or self.DEFAULT_DEBUGGER_PORT
self.debug = debug
self.socket = None
self.connection = None
self.input = None
self.output = None
def log(self, msg):
if self.debug is not None:
self.debug.write(msg + '\n')
def handshake(self):
if self.socket is None:
# create listening socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('', self.port))
self.socket.listen(1)
self.log("Waiting for incoming connection on port %d" % self.port)
# wait for an incoming connection
self.connection, addr = self.socket.accept()
self.log("Accepted connection from %s:%d" % addr)
self.connection.setblocking(1)
self.connection.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
# FIXME(pink): wrap into utf8 encoding stream
self.output = self.connection.makefile('w', 0)
self.input = self.connection.makefile('r', 0)
self.write("ANTLR %s" % self.PROTOCOL_VERSION)
self.write("grammar \"%s" % self.grammarFileName)
self.ack()
def write(self, msg):
self.log("> %s" % msg)
self.output.write("%s\n" % msg)
self.output.flush()
def ack(self):
t = self.input.readline()
self.log("< %s" % t.rstrip())
def transmit(self, event):
self.write(event);
self.ack();
def commence(self):
# don't bother sending event; listener will trigger upon connection
pass
def terminate(self):
self.transmit("terminate")
self.output.close()
self.input.close()
self.connection.close()
self.socket.close()
def enterRule(self, grammarFileName, ruleName):
self.transmit("enterRule\t%s\t%s" % (grammarFileName, ruleName))
def enterAlt(self, alt):
self.transmit("enterAlt\t%d" % alt)
def exitRule(self, grammarFileName, ruleName):
self.transmit("exitRule\t%s\t%s" % (grammarFileName, ruleName))
def enterSubRule(self, decisionNumber):
self.transmit("enterSubRule\t%d" % decisionNumber)
def exitSubRule(self, decisionNumber):
self.transmit("exitSubRule\t%d" % decisionNumber)
def enterDecision(self, decisionNumber):
self.transmit("enterDecision\t%d" % decisionNumber)
def exitDecision(self, decisionNumber):
self.transmit("exitDecision\t%d" % decisionNumber)
def consumeToken(self, t):
self.transmit("consumeToken\t%s" % self.serializeToken(t))
def consumeHiddenToken(self, t):
self.transmit("consumeHiddenToken\t%s" % self.serializeToken(t))
def LT(self, i, o):
if isinstance(o, Tree):
return self.LT_tree(i, o)
return self.LT_token(i, o)
def LT_token(self, i, t):
if t is not None:
self.transmit("LT\t%d\t%s" % (i, self.serializeToken(t)))
def mark(self, i):
self.transmit("mark\t%d" % i)
def rewind(self, i=None):
if i is not None:
self.transmit("rewind\t%d" % i)
else:
self.transmit("rewind")
def beginBacktrack(self, level):
self.transmit("beginBacktrack\t%d" % level)
def endBacktrack(self, level, successful):
self.transmit("endBacktrack\t%d\t%s" % (
level, ['0', '1'][bool(successful)]))
def location(self, line, pos):
self.transmit("location\t%d\t%d" % (line, pos))
def recognitionException(self, exc):
self.transmit('\t'.join([
"exception",
exc.__class__.__name__,
str(int(exc.index)),
str(int(exc.line)),
str(int(exc.charPositionInLine))]))
def beginResync(self):
self.transmit("beginResync")
def endResync(self):
self.transmit("endResync")
def semanticPredicate(self, result, predicate):
self.transmit('\t'.join([
"semanticPredicate",
str(int(result)),
self.escapeNewlines(predicate)]))
## A S T P a r s i n g E v e n t s
def consumeNode(self, t):
FIXME(31)
# StringBuffer buf = new StringBuffer(50);
# buf.append("consumeNode");
# serializeNode(buf, t);
# transmit(buf.toString());
def LT_tree(self, i, t):
FIXME(34)
# int ID = adaptor.getUniqueID(t);
# String text = adaptor.getText(t);
# int type = adaptor.getType(t);
# StringBuffer buf = new StringBuffer(50);
# buf.append("LN\t"); // lookahead node; distinguish from LT in protocol
# buf.append(i);
# serializeNode(buf, t);
# transmit(buf.toString());
def serializeNode(self, buf, t):
FIXME(33)
# int ID = adaptor.getUniqueID(t);
# String text = adaptor.getText(t);
# int type = adaptor.getType(t);
# buf.append("\t");
# buf.append(ID);
# buf.append("\t");
# buf.append(type);
# Token token = adaptor.getToken(t);
# int line = -1;
# int pos = -1;
# if ( token!=null ) {
# line = token.getLine();
# pos = token.getCharPositionInLine();
# }
# buf.append("\t");
# buf.append(line);
# buf.append("\t");
# buf.append(pos);
# int tokenIndex = adaptor.getTokenStartIndex(t);
# buf.append("\t");
# buf.append(tokenIndex);
# serializeText(buf, text);
## A S T E v e n t s
def nilNode(self, t):
self.transmit("nilNode\t%d" % self.adaptor.getUniqueID(t))
def errorNode(self, t):
self.transmit("errorNode\t%d\t%d\t\"%s" % (
self.adaptor.getUniqueID(t),
Token.INVALID_TOKEN_TYPE,
self.escapeNewlines(t.toString())))
def createNode(self, node, token=None):
if token is not None:
self.transmit("createNode\t%d\t%d" % (
self.adaptor.getUniqueID(node),
token.getTokenIndex()))
else:
self.transmit("createNodeFromTokenElements\t%d\t%d\t\"%s" % (
self.adaptor.getUniqueID(node),
self.adaptor.getType(node),
self.adaptor.getText(node)))
def becomeRoot(self, newRoot, oldRoot):
self.transmit("becomeRoot\t%d\t%d" % (
self.adaptor.getUniqueID(newRoot),
self.adaptor.getUniqueID(oldRoot)))
def addChild(self, root, child):
self.transmit("addChild\t%d\t%d" % (
self.adaptor.getUniqueID(root),
self.adaptor.getUniqueID(child)))
def setTokenBoundaries(self, t, tokenStartIndex, tokenStopIndex):
self.transmit("setTokenBoundaries\t%d\t%d\t%d" % (
self.adaptor.getUniqueID(t),
tokenStartIndex, tokenStopIndex))
## support
def setTreeAdaptor(self, adaptor):
self.adaptor = adaptor
def getTreeAdaptor(self):
return self.adaptor
def serializeToken(self, t):
buf = [str(int(t.getTokenIndex())),
str(int(t.getType())),
str(int(t.getChannel())),
str(int(t.getLine() or 0)),
str(int(t.getCharPositionInLine() or 0)),
'\"' + self.escapeNewlines(t.getText())]
return '\t'.join(buf)
def escapeNewlines(self, txt):
if txt is None:
return ''
txt = txt.replace("%","%25") # escape all escape char ;)
txt = txt.replace("\n","%0A") # escape \n
txt = txt.replace("\r","%0D") # escape \r
return txt
| 33,654 | Python | .py | 788 | 34.685279 | 80 | 0.650094 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,328 | compat.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/compat.py | """Compatibility stuff"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
try:
set = set
frozenset = frozenset
except NameError:
from sets import Set as set, ImmutableSet as frozenset
try:
reversed = reversed
except NameError:
def reversed(l):
l = l[:]
l.reverse()
return l
| 1,776 | Python | .py | 42 | 40.095238 | 75 | 0.769676 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,329 | exceptions.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/exceptions.py | """ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from .streams import TokenStream, CharStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from .tree import Tree, CommonTree
from .tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from .streams import TokenStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type if self.token is not None else None
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
| 12,725 | Python | .py | 264 | 39.359848 | 86 | 0.672586 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,330 | main.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/main.py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
import sys
import optparse
import antlr3
class _Main(object):
def __init__(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
def parseOptions(self, argv):
optParser = optparse.OptionParser()
optParser.add_option(
"--encoding",
action="store",
type="string",
dest="encoding"
)
optParser.add_option(
"--input",
action="store",
type="string",
dest="input"
)
optParser.add_option(
"--interactive", "-i",
action="store_true",
dest="interactive"
)
optParser.add_option(
"--no-output",
action="store_true",
dest="no_output"
)
optParser.add_option(
"--profile",
action="store_true",
dest="profile"
)
optParser.add_option(
"--hotshot",
action="store_true",
dest="hotshot"
)
optParser.add_option(
"--port",
type="int",
dest="port",
default=None
)
optParser.add_option(
"--debug-socket",
action='store_true',
dest="debug_socket",
default=None
)
self.setupOptions(optParser)
return optParser.parse_args(argv[1:])
def setupOptions(self, optParser):
pass
def execute(self, argv):
options, args = self.parseOptions(argv)
self.setUp(options)
if options.interactive:
while True:
try:
input = raw_input(">>> ")
except (EOFError, KeyboardInterrupt):
self.stdout.write("\nBye.\n")
break
inStream = antlr3.ANTLRStringStream(input)
self.parseStream(options, inStream)
else:
if options.input is not None:
inStream = antlr3.ANTLRStringStream(options.input)
elif len(args) == 1 and args[0] != '-':
inStream = antlr3.ANTLRFileStream(
args[0], encoding=options.encoding
)
else:
inStream = antlr3.ANTLRInputStream(
self.stdin, encoding=options.encoding
)
if options.profile:
try:
import cProfile as profile
except ImportError:
import profile
profile.runctx(
'self.parseStream(options, inStream)',
globals(),
locals(),
'profile.dat'
)
import pstats
stats = pstats.Stats('profile.dat')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats(100)
elif options.hotshot:
import hotshot
profiler = hotshot.Profile('hotshot.dat')
profiler.runctx(
'self.parseStream(options, inStream)',
globals(),
locals()
)
else:
self.parseStream(options, inStream)
def setUp(self, options):
pass
def parseStream(self, options, inStream):
raise NotImplementedError
def write(self, options, text):
if not options.no_output:
self.stdout.write(text)
def writeln(self, options, text):
self.write(options, text + '\n')
class LexerMain(_Main):
def __init__(self, lexerClass):
_Main.__init__(self)
self.lexerClass = lexerClass
def parseStream(self, options, inStream):
lexer = self.lexerClass(inStream)
for token in lexer:
self.writeln(options, str(token))
class ParserMain(_Main):
def __init__(self, lexerClassName, parserClass):
_Main.__init__(self)
self.lexerClassName = lexerClassName
self.lexerClass = None
self.parserClass = parserClass
def setupOptions(self, optParser):
optParser.add_option(
"--lexer",
action="store",
type="string",
dest="lexerClass",
default=self.lexerClassName
)
optParser.add_option(
"--rule",
action="store",
type="string",
dest="parserRule"
)
def setUp(self, options):
lexerMod = __import__(options.lexerClass)
self.lexerClass = getattr(lexerMod, options.lexerClass)
def parseStream(self, options, inStream):
kwargs = {}
if options.port is not None:
kwargs['port'] = options.port
if options.debug_socket is not None:
kwargs['debug_socket'] = sys.stderr
lexer = self.lexerClass(inStream)
tokenStream = antlr3.CommonTokenStream(lexer)
parser = self.parserClass(tokenStream, **kwargs)
result = getattr(parser, options.parserRule)()
if result is not None:
if hasattr(result, 'tree'):
if result.tree is not None:
self.writeln(options, result.tree.toStringTree())
else:
self.writeln(options, repr(result))
class WalkerMain(_Main):
def __init__(self, walkerClass):
_Main.__init__(self)
self.lexerClass = None
self.parserClass = None
self.walkerClass = walkerClass
def setupOptions(self, optParser):
optParser.add_option(
"--lexer",
action="store",
type="string",
dest="lexerClass",
default=None
)
optParser.add_option(
"--parser",
action="store",
type="string",
dest="parserClass",
default=None
)
optParser.add_option(
"--parser-rule",
action="store",
type="string",
dest="parserRule",
default=None
)
optParser.add_option(
"--rule",
action="store",
type="string",
dest="walkerRule"
)
def setUp(self, options):
lexerMod = __import__(options.lexerClass)
self.lexerClass = getattr(lexerMod, options.lexerClass)
parserMod = __import__(options.parserClass)
self.parserClass = getattr(parserMod, options.parserClass)
def parseStream(self, options, inStream):
lexer = self.lexerClass(inStream)
tokenStream = antlr3.CommonTokenStream(lexer)
parser = self.parserClass(tokenStream)
result = getattr(parser, options.parserRule)()
if result is not None:
assert hasattr(result, 'tree'), "Parser did not return an AST"
nodeStream = antlr3.tree.CommonTreeNodeStream(result.tree)
nodeStream.setTokenStream(tokenStream)
walker = self.walkerClass(nodeStream)
result = getattr(walker, options.walkerRule)()
if result is not None:
if hasattr(result, 'tree'):
self.writeln(options, result.tree.toStringTree())
else:
self.writeln(options, repr(result))
| 9,134 | Python | .py | 248 | 25.46371 | 75 | 0.5693 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,331 | dottreegen.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/dottreegen.py | """ @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from .tree import CommonTreeAdaptor
import stringtemplate3
class DOTTreeGenerator(object):
"""
A utility class to generate DOT diagrams (graphviz) from
arbitrary trees. You can pass in your own templates and
can pass in any kind of tree or use Tree interface method.
"""
_treeST = stringtemplate3.StringTemplate(
template=(
"digraph {\n" +
" ordering=out;\n" +
" ranksep=.4;\n" +
" node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
" width=.25, height=.25];\n" +
" edge [arrowsize=.5]\n" +
" $nodes$\n" +
" $edges$\n" +
"}\n")
)
_nodeST = stringtemplate3.StringTemplate(
template="$name$ [label=\"$text$\"];\n"
)
_edgeST = stringtemplate3.StringTemplate(
template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n"
)
def __init__(self):
## Track node to number mapping so we can get proper node name back
self.nodeToNumberMap = {}
## Track node number so we can get unique node names
self.nodeNumber = 0
def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST):
if adaptor is None:
adaptor = CommonTreeAdaptor()
treeST = treeST.getInstanceOf()
self.nodeNumber = 0
self.toDOTDefineNodes(tree, adaptor, treeST)
self.nodeNumber = 0
self.toDOTDefineEdges(tree, adaptor, treeST, edgeST)
return treeST
def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None):
if knownNodes is None:
knownNodes = set()
if tree is None:
return
n = adaptor.getChildCount(tree)
if n == 0:
# must have already dumped as child from previous
# invocation; do nothing
return
# define parent node
number = self.getNodeNumber(tree)
if number not in knownNodes:
parentNodeST = self.getNodeST(adaptor, tree)
treeST.setAttribute("nodes", parentNodeST)
knownNodes.add(number)
# for each child, do a "<unique-name> [label=text]" node def
for i in range(n):
child = adaptor.getChild(tree, i)
number = self.getNodeNumber(child)
if number not in knownNodes:
nodeST = self.getNodeST(adaptor, child)
treeST.setAttribute("nodes", nodeST)
knownNodes.add(number)
self.toDOTDefineNodes(child, adaptor, treeST, knownNodes)
def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST):
if tree is None:
return
n = adaptor.getChildCount(tree)
if n == 0:
# must have already dumped as child from previous
# invocation; do nothing
return
parentName = "n%d" % self.getNodeNumber(tree)
# for each child, do a parent -> child edge using unique node names
parentText = adaptor.getText(tree)
for i in range(n):
child = adaptor.getChild(tree, i)
childText = adaptor.getText(child)
childName = "n%d" % self.getNodeNumber(child)
edgeST = edgeST.getInstanceOf()
edgeST.setAttribute("parent", parentName)
edgeST.setAttribute("child", childName)
edgeST.setAttribute("parentText", parentText)
edgeST.setAttribute("childText", childText)
treeST.setAttribute("edges", edgeST)
self.toDOTDefineEdges(child, adaptor, treeST, edgeST)
def getNodeST(self, adaptor, t):
text = adaptor.getText(t)
nodeST = self._nodeST.getInstanceOf()
uniqueName = "n%d" % self.getNodeNumber(t)
nodeST.setAttribute("name", uniqueName)
if text is not None:
text = text.replace('"', r'\\"')
nodeST.setAttribute("text", text)
return nodeST
def getNodeNumber(self, t):
try:
return self.nodeToNumberMap[t]
except KeyError:
self.nodeToNumberMap[t] = self.nodeNumber
self.nodeNumber += 1
return self.nodeNumber - 1
def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST):
"""
Generate DOT (graphviz) for a whole tree not just a node.
For example, 3+4*5 should generate:
digraph {
node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
width=.4, height=.2];
edge [arrowsize=.7]
"+"->3
"+"->"*"
"*"->4
"*"->5
}
Return the ST not a string in case people want to alter.
Takes a Tree interface object.
Example of invokation:
import antlr3
import antlr3.extras
input = antlr3.ANTLRInputStream(sys.stdin)
lex = TLexer(input)
tokens = antlr3.CommonTokenStream(lex)
parser = TParser(tokens)
tree = parser.e().tree
print tree.toStringTree()
st = antlr3.extras.toDOT(t)
print st
"""
gen = DOTTreeGenerator()
return gen.toDOT(tree, adaptor, treeST, edgeST)
| 6,974 | Python | .py | 167 | 33.916168 | 96 | 0.649911 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,332 | extras.py | simonwagner_mergepbx/src/plist/antlr/runtime/antlr3/extras.py | """ @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from treewizard import TreeWizard
try:
from .dottreegen import toDOT
except ImportError, exc:
def toDOT(*args, **kwargs):
raise exc
| 1,907 | Python | .py | 41 | 44.97561 | 79 | 0.786022 | simonwagner/mergepbx | 1,037 | 46 | 14 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,333 | setup.py | devsnd_cherrymusic/setup.py | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os
import sys
import codecs
try:
import py2exe
except ImportError:
pass
import re
here = os.path.abspath(os.path.dirname(__file__))
def get_global_str_from_file(rel_filepath, var):
prog = re.compile(r'^{0} = ("|\')(.*?)("|\')'.format(var))
with open(os.path.join(here, rel_filepath), 'r') as f:
for line in f.readlines():
res = prog.match(line)
if res is not None:
return str(res.group(2))
initFile = 'cherrymusicserver/__init__.py'
VERSION = get_global_str_from_file(initFile, 'VERSION')
DESCRIPTION = get_global_str_from_file(initFile, 'DESCRIPTION')
import gzip
def gzipManPages():
localManPagePath = 'doc/man'
for manpage in os.listdir(localManPagePath):
#man pages end in numbers
if manpage.endswith(tuple(map(str,range(10)))):
manpagefn = os.path.join(localManPagePath, manpage)
with open(manpagefn, 'rb') as manfile:
manfilegz = gzip.open(manpagefn+'.gz', 'wb')
manfilegz.writelines(manfile)
manfilegz.close()
def list_files_in_dir(crawlpath, installpath, filterfunc=None, excluded_paths=None):
all_files = []
for dirpath, dirnames, filenames in os.walk(crawlpath):
if excluded_paths and any(dirpath.startswith(path) for path in excluded_paths):
continue
files = []
for filename in filenames:
if filterfunc:
if not filterfunc(filename):
continue
files += [os.path.join(dirpath, filename)]
all_files += [(os.path.join(installpath, dirpath), files)]
return all_files
def module(foldername):
ret = [foldername]
for i in os.listdir(foldername):
if i == '__pycache__':
continue
subfolder = os.path.join(foldername, i)
if os.path.isdir(subfolder) and _ispackage(subfolder):
ret += module(subfolder)
ret += [subfolder.replace(os.sep,'.')]
return ret
def _ispackage(foldername):
return '__init__.py' in os.listdir(foldername)
def read(*parts):
return codecs.open(os.path.join(here, *parts), 'r').read()
def packagedata(pkgfolder, childpath=''):
ret = []
for n in os.listdir(os.path.join(pkgfolder, childpath)):
if n == '__pycache__':
continue
child = os.path.join(childpath, n)
fullchild = os.path.join(pkgfolder, child)
if os.path.isdir(fullchild):
if not _ispackage(fullchild):
ret += packagedata(pkgfolder, child)
elif os.path.isfile(fullchild):
if not os.path.splitext(n)[1].startswith('.py'):
ret += [child]
return ret
#setup preparations:
gzipManPages()
pathproviderFile = os.path.join('cherrymusicserver/pathprovider.py')
shareFolder = os.path.join(
'share', get_global_str_from_file(pathproviderFile, 'sharedFolderName')
)
# files to put in /usr/share
data_files = list_files_in_dir(
'res',
shareFolder,
excluded_paths=['res/react-client/node_modules']
)
long_description = None
if 'upload' in sys.argv or 'register' in sys.argv:
readmemd = "\n" + "\n".join([read('README.md')])
print("converting markdown to reStucturedText for upload to pypi.")
from urllib.request import urlopen
from urllib.parse import quote
import json
url = 'http://pandoc.org/cgi-bin/trypandoc?from=markdown&to=rst&text=%s'
urlhandler = urlopen(url % quote(readmemd))
result = json.loads(codecs.decode(urlhandler.read(), 'utf-8'))
long_description = result['html']
else:
long_description = "\n" + "\n".join([read('README.md')])
setup_options = {
'name': 'CherryMusic',
'version': VERSION,
'description': DESCRIPTION,
'long_description': long_description,
'author': 'Tom Wallroth & Tilman Boerner',
'author_email': 'tomwallroth@gmail.com, tilman.boerner@gmx.net',
'url': 'http://www.fomori.org/cherrymusic/',
'license': 'GPLv3',
'install_requires': ['CherryPy >= 3.2.2'],
'packages': module('cherrymusicserver')+module('tinytag')+module('audiotranscode')+module('cmbootstrap')+module('backport'),
'package_data': {
'cherrymusicserver.database.defs': packagedata('cherrymusicserver/database/defs'),
},
#startup script
'scripts': ['cherrymusic','cherrymusicd','cherrymusic-tray'],
'classifiers': [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: CherryPy',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Natural Language :: English',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
'data_files': data_files
}
if os.name == 'nt':
#py2exe specific
setup_options['console'] = [
{
'icon_resources': [(1, 'res/favicon.ico')],
'script':'cherrymusic'
}
]
setup(**setup_options)
| 5,647 | Python | .py | 149 | 31.248322 | 128 | 0.634307 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,334 | __init__.py | devsnd_cherrymusic/cmbootstrap/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""
This script is used to download cherrymusic dependencies on first startup.
"""
try:
import urllib.request
except ImportError:
from backport import urllib
from backport import input
import tempfile
import tarfile
import shutil
import os
class DependencyInstaller:
def __init__(self):
self.cherrymusicfolder = os.path.dirname(os.path.dirname(__file__))
def install_cherrypy(self):
"""
cherrypy releases: https://bitbucket.org/cherrypy/cherrypy/downloads
"""
cherrypygitcommit = "586bee7ac445"
cherrypyurl = "https://bitbucket.org/cherrypy/cherrypy/get/%s.tar.gz" % cherrypygitcommit
cherrypysubfolder = os.path.join('cherrypy-cherrypy-%s'%cherrypygitcommit,'cherrypy')
cherrypytempfile = os.path.join(tempfile.gettempdir(),'cherrypy.tar.gz')
cherrypytempdir = self.tmpdir('cherrypy')
print('Downloading cherrypy...')
self.dl(cherrypyurl, cherrypytempfile)
print('Extracting %s ' % cherrypytempfile)
tarc = tarfile.open(cherrypytempfile,'r:gz')
tarc.extractall(cherrypytempdir)
tarc.close()
print('Copying cherrypy module inside cherrymusic folder (%s)...' % self.cherrymusicfolder)
moduledir = os.path.join(cherrypytempdir,cherrypysubfolder)
shutil.copytree(moduledir,os.path.join(self.cherrymusicfolder,'cherrypy'))
print('Cleaning up temporary files...')
shutil.rmtree(cherrypytempdir)
os.remove(cherrypytempfile)
def tmpdir(self, name):
tempdirpath = os.path.join(tempfile.gettempdir(),name)
if os.path.exists(tempdirpath):
print("Directory %s already exists." % tempdirpath)
if 'y' == input('Do you want to delete its contents and proceed? [y/n]'):
shutil.rmtree(tempdirpath)
else:
print("Cannot install dependency.")
exit(1)
os.mkdir(tempdirpath)
return tempdirpath
def dl(self,url,target):
with open(target, 'wb') as f:
urlhandler = urllib.request.urlopen(urllib.request.Request(url))
f.write(urlhandler.read())
def bootstrap():
import sys
try:
import cherrypy
except ImportError:
print('''
CherryMusic needs the module "cherrypy" to run. You should install it
using the package manager of your OS. Alternatively cherrymusic can
download it for you and put it in the folder in which currently
CherryMusic resides.
''')
if input("Download cherrypy now? (y/N)\n") in ('y', 'yes'):
inst = DependencyInstaller()
inst.install_cherrypy()
print('Successfully installed cherrymusic dependencies! You can now start cherrymusic.')
else:
sys.exit(1)
| 3,910 | Python | .py | 98 | 34 | 100 | 0.68883 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,335 | tinytag.py | devsnd_cherrymusic/tinytag/tinytag.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# tinytag - an audio meta info reader
# Copyright (c) 2014-2015 Tom Wallroth
#
# Sources on github:
# http://github.com/devsnd/tinytag/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import codecs
from functools import reduce
import struct
import os
import io
from io import BytesIO
DEBUG = False # some of the parsers will print some debug info when set to True
class TinyTagException(Exception):
pass
def _read(fh, nbytes): # helper function to check if we haven't reached EOF
b = fh.read(nbytes)
if len(b) < nbytes:
raise TinyTagException('Unexpected end of file')
return b
class TinyTag(object):
"""Base class for all tag types"""
def __init__(self, filehandler, filesize):
self._filehandler = filehandler
self.filesize = filesize
self.album = None
self.albumartist = None
self.artist = None
self.audio_offset = 0
self.bitrate = 0.0 # must be float for later VBR calculations
self.channels = None
self.disc = None
self.disc_total = None
self.duration = 0
self.genre = None
self.samplerate = None
self.title = None
self.track = None
self.track_total = None
self.year = None
self._load_image = False
self._image_data = None
def get_image(self):
return self._image_data
def has_all_tags(self):
"""check if all tags are already defined. Useful for ID3 tags
since multiple kinds of tags can be in one audio file
"""
return all((self.track, self.track_total, self.title,
self.artist, self.album, self.albumartist,
self.year, self.genre))
@classmethod
def get(cls, filename, tags=True, duration=True, image=False):
parser_class = None
size = os.path.getsize(filename)
if not size > 0:
return TinyTag(None, 0)
if cls == TinyTag:
"""choose which tag reader should be used by file extension"""
mapping = {
('.mp3',): ID3,
('.oga', '.ogg', '.opus'): Ogg,
('.wav'): Wave,
('.flac'): Flac,
('.wma'): Wma,
('.m4a', '.mp4'): MP4,
}
for fileextension, tagclass in mapping.items():
if filename.lower().endswith(fileextension):
parser_class = tagclass
else:
# use class on which the method was invoked as parser
parser_class = cls
if parser_class is None:
raise LookupError('No tag reader found to support filetype! ')
with io.open(filename, 'rb') as af:
tag = parser_class(af, size)
tag.load(tags=tags, duration=duration, image=image)
return tag
def __str__(self):
public_attrs = ((k, v) for k, v in self.__dict__.items() if not k.startswith('_'))
return str(dict(public_attrs))
def __repr__(self):
return str(self)
def load(self, tags, duration, image=False):
"""default behavior of all tags. This method is called in the
constructors of all tag readers
"""
if image:
self._load_image = True
if tags:
self._parse_tag(self._filehandler)
if duration:
if tags: # rewind file if the tags were already parsed
self._filehandler.seek(0)
self._determine_duration(self._filehandler)
def _set_field(self, fieldname, bytestring, transfunc=None, overwrite=True):
"""convienience function to set fields of the tinytag by name.
the payload (bytestring) can be changed using the transfunc"""
if getattr(self, fieldname):
return
value = bytestring if transfunc is None else transfunc(bytestring)
if fieldname == 'genre' and value.isdigit() and int(value) < len(ID3.ID3V1_GENRES):
# funky: id3v1 genre hidden in a id3v2 field
value = ID3.ID3V1_GENRES[int(value)]
if fieldname in ("track", "disc"):
current = total = None
if type(value).__name__ in ('str', 'unicode') and '/' in value:
current, total = value.split('/')[:2]
setattr(self, "%s_total" % fieldname, total)
else:
current = value
setattr(self, fieldname, current)
else:
setattr(self, fieldname, value)
def _determine_duration(self, fh):
raise NotImplementedError()
def _parse_tag(self, fh):
raise NotImplementedError()
def update(self, other):
"""update the values of this tag with the values from another tag"""
for key in ['track', 'track_total', 'title', 'artist',
'album', 'albumartist', 'year', 'duration',
'genre', 'disc', 'disc_total']:
if not getattr(self, key) and getattr(other, key):
setattr(self, key, getattr(other, key))
def _bytes_to_int(self, b):
return reduce(lambda accu, elem: (accu << 8) + elem, b, 0)
def _bytes_to_int_le(self, b):
fmt = {1: '<B', 2: '<H', 4: '<I', 8: '<Q'}
if len(b) not in fmt:
return 0
return struct.unpack(fmt[len(b)], b)[0] if len(b) else 0
def _unpad(self, s):
# strings in mp3 and asf _can_ be terminated with a zero byte at the end
return s[:s.index('\x00')] if '\x00' in s else s
class MP4(TinyTag):
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html
# and: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html
class Parser:
ATOM_DECODER_BY_TYPE = {
0: lambda x: x, # 'reserved',
1: lambda x: codecs.decode(x, 'utf-8'), # UTF-8
2: lambda x: codecs.decode(x, 'utf-16'), # UTF-16
3: lambda x: codecs.decode(x, 's/jis'), # S/JIS
# 16: duration in millis
13: lambda x: x, # JPEG
14: lambda x: x, # PNG
21: lambda x: struct.unpack('>b', x)[0], # BE Signed Integer
22: lambda x: struct.unpack('>B', x)[0], # BE Unsigned Integer
23: lambda x: struct.unpack('>f', x)[0], # BE Float32
24: lambda x: struct.unpack('>d', x)[0], # BE Float64
# 27: lambda x: x, # BMP
# 28: lambda x: x, # QuickTime Metadata atom
65: lambda x: struct.unpack('b', x)[0], # 8-bit Signed Integer
66: lambda x: struct.unpack('>h', x)[0], # BE 16-bit Signed Integer
67: lambda x: struct.unpack('>i', x)[0], # BE 32-bit Signed Integer
74: lambda x: struct.unpack('>q', x)[0], # BE 64-bit Signed Integer
75: lambda x: struct.unpack('B', x)[0], # 8-bit Unsigned Integer
76: lambda x: struct.unpack('>H', x)[0], # BE 16-bit Unsigned Integer
77: lambda x: struct.unpack('>I', x)[0], # BE 32-bit Unsigned Integer
78: lambda x: struct.unpack('>Q', x)[0], # BE 64-bit Unsigned Integer
}
@classmethod
def make_data_atom_parser(cls, fieldname):
def parse_data_atom(data_atom):
data_type = struct.unpack('>I', data_atom[:4])[0]
conversion = cls.ATOM_DECODER_BY_TYPE.get(data_type)
if conversion is None:
print('Cannot convert data type: %s' % data_type)
return {} # don't know how to convert data atom
# skip header & null-bytes, convert rest
return {fieldname: conversion(data_atom[8:])}
return parse_data_atom
@classmethod
def make_number_parser(cls, fieldname1, fieldname2):
def _(data_atom):
number_data = data_atom[8:14]
numbers = struct.unpack('>HHH', number_data)
# for some reason the first number is always irrelevant.
return {fieldname1: numbers[1], fieldname2: numbers[2]}
return _
@classmethod
def parse_id3v1_genre(cls, data_atom):
# dunno why the genre is offset by -1 but this is how mutagen does it
idx = struct.unpack('>H', data_atom[8:])[0] - 1
if idx < len(ID3.ID3V1_GENRES):
return {'genre': ID3.ID3V1_GENRES[idx]}
return {'genre': None}
@classmethod
def parse_audio_sample_entry(cls, data):
# this atom also contains the esds atom:
# https://ffmpeg.org/doxygen/0.6/mov_8c-source.html
# http://xhelmboyx.tripod.com/formats/mp4-layout.txt
datafh = BytesIO(data)
datafh.seek(16, os.SEEK_CUR) # jump over version and flags
channels = struct.unpack('>H', datafh.read(2))[0]
bit_depth = struct.unpack('>H', datafh.read(2))[0]
datafh.seek(2, os.SEEK_CUR) # jump over QT compr id & pkt size
sr = struct.unpack('>I', datafh.read(4))[0]
esds_atom_size = struct.unpack('>I', data[28:32])[0]
esds_atom = BytesIO(data[36:36 + esds_atom_size])
# http://sasperger.tistory.com/103
esds_atom.seek(22, os.SEEK_CUR) # jump over most data...
max_br = struct.unpack('>I', esds_atom.read(4))[0] / 1000 # use
avg_br = struct.unpack('>I', esds_atom.read(4))[0] / 1000 # kbit/s
return {'channels': channels, 'samplerate': sr, 'bitrate': avg_br}
@classmethod
def parse_mvhd(cls, data):
# http://stackoverflow.com/a/3639993/1191373
walker = BytesIO(data)
version = struct.unpack('b', walker.read(1))[0]
flags = walker.read(3)
if version == 0: # uses 32 bit integers for timestamps
walker.seek(8, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>I', walker.read(4))[0]
else: # version == 1: # uses 64 bit integers for timestamps
walker.seek(16, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>q', walker.read(8))[0]
return {'duration': float(duration) / time_scale}
@classmethod
def debug_atom(cls, data):
print(data) # use this function to inspect atoms in an atom tree
return {}
# The parser tree: Each key is an atom branch which is traversed if existing.
# Leaves of the parser tree are callables which receive the atom data.
# callables return {fieldname: value} which is applied to the tinytag instance.
META_DATA_TREE = {b'moov': { b'udta': {b'meta': {b'ilst': {
# see: http://atomicparsley.sourceforge.net/mpeg-4files.html
b'\xa9alb': {b'data': Parser.make_data_atom_parser('album')},
b'\xa9ART': {b'data': Parser.make_data_atom_parser('artist')},
b'aART': {b'data': Parser.make_data_atom_parser('albumartist')},
# b'cpil': {b'data': Parser.make_data_atom_parser('compilation')},
b'disk': {b'data': Parser.make_number_parser('disc', 'disc_total')},
# b'\xa9wrt': {b'data': Parser.make_data_atom_parser('composer')},
b'\xa9day': {b'data': Parser.make_data_atom_parser('year')},
b'\xa9gen': {b'data': Parser.make_data_atom_parser('genre')},
b'gnre': {b'data': Parser.parse_id3v1_genre},
b'\xa9nam': {b'data': Parser.make_data_atom_parser('title')},
b'trkn': {b'data': Parser.make_number_parser('track', 'track_total')},
# b'covr': {b'data': Parser.make_data_atom_parser('_image_data')},
}}}}}
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
AUDIO_DATA_TREE = {
b'moov': {
b'mvhd': Parser.parse_mvhd,
b'trak': {b'mdia': {b"minf": {b"stbl": {b"stsd": {b'mp4a':
Parser.parse_audio_sample_entry
}}}}}
}
}
VERSIONED_ATOMS = set((b'meta', b'stsd')) # those have an extra 4 byte header
FLAGGED_ATOMS = set((b'stsd',)) # these also have an extra 4 byte header
def _determine_duration(self, fh):
return self._traverse_atoms(fh, path=self.AUDIO_DATA_TREE)
def _parse_tag(self, fh):
return self._traverse_atoms(fh, path=self.META_DATA_TREE)
def _traverse_atoms(self, fh, path, indent=0, stop_pos=None, curr_path=None):
header_size = 8
atom_header = fh.read(header_size)
while len(atom_header) == header_size:
atom_size = struct.unpack('>I', atom_header[:4])[0] - header_size
atom_type = atom_header[4:]
if curr_path is None: # keep track how we traversed in the tree
curr_path = [atom_type]
if atom_size <= 0: # empty atom, jump to next one
atom_header = fh.read(header_size)
continue
if DEBUG:
print('%s pos: %d atom: %s len: %d' % (' ' * 4 * len(curr_path), fh.tell() - header_size, atom_type, atom_size + header_size))
if atom_type in self.VERSIONED_ATOMS: # jump atom version for now
fh.seek(4, os.SEEK_CUR)
if atom_type in self.FLAGGED_ATOMS: # jump atom flags for now
fh.seek(4, os.SEEK_CUR)
sub_path = path.get(atom_type, None)
# if the path leaf is a dict, traverse deeper into the tree:
if issubclass(type(sub_path), MutableMapping):
atom_end_pos = fh.tell() + atom_size
self._traverse_atoms(fh, path=sub_path, stop_pos=atom_end_pos,
curr_path=curr_path + [atom_type])
# if the path-leaf is a callable, call it on the atom data
elif callable(sub_path):
for fieldname, value in sub_path(fh.read(atom_size)).items():
if DEBUG:
print(' ' * 4 * len(curr_path), 'FIELDNAME: ', fieldname)
if fieldname:
self._set_field(fieldname, value)
# if no action was specified using dict or callable, jump over atom
else:
fh.seek(atom_size, os.SEEK_CUR)
# check if we have reached the end of this branch:
if stop_pos and fh.tell() >= stop_pos:
return # return to parent (next parent node in tree)
atom_header = fh.read(header_size) # read next atom
class ID3(TinyTag):
FRAME_ID_TO_FIELD = { # Mapping from Frame ID to a field of the TinyTag
'TRCK': 'track', 'TRK': 'track',
'TYER': 'year', 'TYE': 'year',
'TALB': 'album', 'TAL': 'album',
'TPE1': 'artist', 'TP1': 'artist',
'TIT2': 'title', 'TT2': 'title',
'TCON': 'genre', 'TPOS': 'disc',
'TPE2': 'albumartist',
}
_MAX_ESTIMATION_SEC = 30
_CBR_DETECTION_FRAME_COUNT = 5
_USE_XING_HEADER = True # much faster, but can be deactivated for testing
ID3V1_GENRES = [
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco',
'Funk', 'Grunge', 'Hip-Hop', 'Jazz', 'Metal', 'New Age', 'Oldies',
'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack',
'Euro-Techno', 'Ambient', 'Trip-Hop', 'Vocal', 'Jazz+Funk', 'Fusion',
'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game',
'Sound Clip', 'Gospel', 'Noise', 'AlternRock', 'Bass', 'Soul', 'Punk',
'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock',
'Ethnic', 'Gothic','Darkwave', 'Techno-Industrial', 'Electronic',
'Pop-Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult',
'Gangsta', 'Top 40', 'Christian Rap', 'Pop/Funk', 'Jungle',
'Native American', 'Cabaret', 'New Wave', 'Psychadelic', 'Rave',
'Showtunes', 'Trailer', 'Lo-Fi', 'Tribal', 'Acid Punk', 'Acid Jazz',
'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# Wimamp Extended Genres
'Folk', 'Folk-Rock', 'National Folk', 'Swing', 'Fast Fusion', 'Bebob',
'Latin', 'Revival', 'Celtic', 'Bluegrass', 'Avantgarde', 'Gothic Rock',
'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock',
'Big Band', 'Chorus', 'Easy Listening', 'Acoustic', 'Humour', 'Speech',
'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass',
'Primus', 'Porn Groove', 'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba',
'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
'Duet', 'Punk Rock', 'Drum Solo', 'A capella', 'Euro-House', 'Dance Hall',
'Goa', 'Drum & Bass',
# according to https://de.wikipedia.org/wiki/Liste_der_ID3v1-Genres:
'Club-House', 'Hardcore Techno', 'Terror', 'Indie', 'BritPop',
# don't use ethnic slur ("Negerpunk", WTF!)
'',
'Polsk Punk', 'Beat', 'Christian Gangsta Rap',
'Heavy Metal', 'Black Metal', 'Contemporary Christian',
'Christian Rock',
# WinAmp 1.91
'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'Jpop', 'Synthpop',
# WinAmp 5.6
'Abstract', 'Art Rock', 'Baroque', 'Bhangra', 'Big Beat', 'Breakbeat',
'Chillout', 'Downtempo', 'Dub', 'EBM', 'Eclectic', 'Electro',
'Electroclash', 'Emo', 'Experimental', 'Garage', 'Illbient',
'Industro-Goth', 'Jam Band', 'Krautrock', 'Leftfield', 'Lounge',
'Math Rock', 'New Romantic', 'Nu-Breakz', 'Post-Punk', 'Post-Rock',
'Psytrance', 'Shoegaze', 'Space Rock', 'Trop Rock', 'World Music',
'Neoclassical', 'Audiobook', 'Audio Theatre', 'Neue Deutsche Welle',
'Podcast', 'Indie Rock', 'G-Funk', 'Dubstep', 'Garage Rock', 'Psybient',
]
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
# save position after the ID3 tag for duration mesurement speedup
self._bytepos_after_id3v2 = 0
@classmethod
def set_estimation_precision(cls, estimation_in_seconds):
cls._MAX_ESTIMATION_SEC = estimation_in_seconds
# see this page for the magic values used in mp3:
# http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm
samplerates = [
[11025, 12000, 8000], # MPEG 2.5
[], # reserved
[22050, 24000, 16000], # MPEG 2
[44100, 48000, 32000], # MPEG 1
]
v1l1 = [0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0]
v1l2 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0]
v1l3 = [0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0]
v2l1 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0]
v2l2 = [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0]
v2l3 = v2l2
bitrate_by_version_by_layer = [
[None, v2l3, v2l2, v2l1], # MPEG Version 2.5 # note that the layers go
None, # reserved # from 3 to 1 by design.
[None, v2l3, v2l2, v2l1], # MPEG Version 2 # the first layer id is
[None, v1l3, v1l2, v1l1], # MPEG Version 1 # reserved
]
samples_per_frame = 1152 # the default frame size for mp3
channels_per_channel_mode = [
2, # 00 Stereo
2, # 01 Joint stereo (Stereo)
2, # 10 Dual channel (2 mono channels)
1, # 11 Single channel (Mono)
]
def _parse_xing_header(self, fh):
# see: http://www.mp3-tech.org/programmer/sources/vbrheadersdk.zip
fh.seek(4, os.SEEK_CUR) # read over Xing header
header_flags = struct.unpack('>i', fh.read(4))[0]
frames = byte_count = toc = vbr_scale = None
if header_flags & 1: # FRAMES FLAG
frames = struct.unpack('>i', fh.read(4))[0]
if header_flags & 2: # BYTES FLAG
byte_count = struct.unpack('>i', fh.read(4))[0]
if header_flags & 4: # TOC FLAG
toc = [struct.unpack('>i', fh.read(4))[0] for _ in range(100)]
if header_flags & 8: # VBR SCALE FLAG
vbr_scale = struct.unpack('>i', fh.read(4))[0]
return frames, byte_count, toc, vbr_scale
def _determine_duration(self, fh):
max_estimation_frames = (ID3._MAX_ESTIMATION_SEC*44100) // ID3.samples_per_frame
frame_size_accu = 0
header_bytes = 4
frames = 0 # count frames for determining mp3 duration
bitrate_accu = 0 # add up bitrates to find average bitrate
last_bitrates = [] # to detect CBR mp3s (multiple frames with same bitrates)
# seek to first position after id3 tag (speedup for large header)
fh.seek(self._bytepos_after_id3v2)
while True:
# reading through garbage until 11 '1' sync-bits are found
b = fh.peek(4)
if len(b) < 4:
break # EOF
sync, conf, bitrate_freq, rest = struct.unpack('BBBB', b[0:4])
br_id = (bitrate_freq >> 4) & 0x0F # biterate id
sr_id = (bitrate_freq >> 2) & 0x03 # sample rate id
padding = 1 if bitrate_freq & 0x02 > 0 else 0
mpeg_id = (conf >> 3) & 0x03
layer_id = (conf >> 1) & 0x03
channel_mode = (rest >> 6) & 0x03
self.channels = self.channels_per_channel_mode[channel_mode]
# check for eleven 1s, validate bitrate and sample rate
if not b[:2] > b'\xFF\xE0' or br_id > 14 or br_id == 0 or sr_id == 3:
idx = b.find(b'\xFF', 1) # invalid frame, find next sync header
if idx == -1:
idx = len(b) # not found: jump over the current peek buffer
fh.seek(max(idx, 1), os.SEEK_CUR)
continue
try:
self.samplerate = ID3.samplerates[mpeg_id][sr_id]
frame_bitrate = ID3.bitrate_by_version_by_layer[mpeg_id][layer_id][br_id]
except (IndexError, TypeError):
raise TinyTagException('mp3 parsing failed')
# There might be a xing header in the first frame that contains
# all the info we need, otherwise parse multiple frames to find the
# accurate average bitrate
if frames == 0 and ID3._USE_XING_HEADER:
xing_header_offset = b.find(b'Xing')
if xing_header_offset != -1:
fh.seek(xing_header_offset, os.SEEK_CUR)
xframes, byte_count, toc, vbr_scale = self._parse_xing_header(fh)
if xframes is not None and byte_count is not None:
self.duration = xframes * ID3.samples_per_frame / float(self.samplerate)
self.bitrate = byte_count * 8 / self.duration
self.audio_offset = fh.tell()
return
continue
frames += 1 # it's most probably an mp3 frame
bitrate_accu += frame_bitrate
if frames == 1:
self.audio_offset = fh.tell()
if frames <= ID3._CBR_DETECTION_FRAME_COUNT:
last_bitrates.append(frame_bitrate)
fh.seek(4, os.SEEK_CUR) # jump over peeked bytes
frame_length = (144000 * frame_bitrate) // self.samplerate + padding
frame_size_accu += frame_length
# if bitrate does not change over time its CBR
is_cbr = frames == ID3._CBR_DETECTION_FRAME_COUNT and len(set(last_bitrates)) == 1
if frames == max_estimation_frames or is_cbr:
# try to estimate duration
fh.seek(-128, 2) # jump to last byte (leaving out id3v1 tag)
audio_stream_size = fh.tell() - self.audio_offset
est_frame_count = audio_stream_size / (frame_size_accu / float(frames))
samples = est_frame_count * ID3.samples_per_frame
self.duration = samples / float(self.samplerate)
self.bitrate = bitrate_accu / frames
return
if frame_length > 1: # jump over current frame body
fh.seek(frame_length - header_bytes, os.SEEK_CUR)
if self.samplerate:
self.duration = frames * ID3.samples_per_frame / float(self.samplerate)
def _parse_tag(self, fh):
self._parse_id3v2(fh)
if not self.has_all_tags() and self.filesize > 128:
fh.seek(-128, os.SEEK_END) # try parsing id3v1 in the last 128 bytes
self._parse_id3v1(fh)
def _parse_id3v2(self, fh):
# for info on the specs, see: http://id3.org/Developer%20Information
header = struct.unpack('3sBBB4B', _read(fh, 10))
tag = codecs.decode(header[0], 'ISO-8859-1')
# check if there is an ID3v2 tag at the beginning of the file
if tag == 'ID3':
major, rev = header[1:3]
unsync = (header[3] & 0x80) > 0
extended = (header[3] & 0x40) > 0
experimental = (header[3] & 0x20) > 0
footer = (header[3] & 0x10) > 0
size = self._calc_size(header[4:8], 7)
self._bytepos_after_id3v2 = size
parsed_size = 0
if extended: # just read over the extended header.
size_bytes = struct.unpack('4B', _read(fh, 6)[0:4])
extd_size = self._calc_size(size_bytes, 7)
fh.seek(extd_size - 6, os.SEEK_CUR) # jump over extended_header
while parsed_size < size:
frame_size = self._parse_frame(fh, id3version=major)
if frame_size == 0:
break
parsed_size += frame_size
def _parse_id3v1(self, fh):
if fh.read(3) == b'TAG': # check if this is an ID3 v1 tag
asciidecode = lambda x: self._unpad(codecs.decode(x, 'latin1'))
fields = fh.read(30 + 30 + 30 + 4 + 30 + 1)
self._set_field('title', fields[:30], transfunc=asciidecode)
self._set_field('artist', fields[30:60], transfunc=asciidecode)
self._set_field('album', fields[60:90], transfunc=asciidecode)
self._set_field('year', fields[90:94], transfunc=asciidecode)
comment = fields[94:124]
if b'\x00\x00' < comment[-2:] < b'\x01\x00':
self._set_field('track', str(ord(comment[-1:])))
genre_id = ord(fields[124:125])
if genre_id < len(ID3.ID3V1_GENRES):
self.genre = ID3.ID3V1_GENRES[genre_id]
def _parse_frame(self, fh, id3version=False):
encoding = 'ISO-8859-1' # default encoding used in most mp3 tags
# ID3v2.2 especially ugly. see: http://id3.org/id3v2-00
frame_header_size = 6 if id3version == 2 else 10
frame_size_bytes = 3 if id3version == 2 else 4
binformat = '3s3B' if id3version == 2 else '4s4B2B'
bits_per_byte = 7 if id3version == 4 else 8 # only id3v2.4 is synchsafe
frame_header_data = fh.read(frame_header_size)
if len(frame_header_data) == 0:
return 0
frame = struct.unpack(binformat, frame_header_data)
frame_id = self._decode_string(frame[0])
frame_size = self._calc_size(frame[1:1+frame_size_bytes], bits_per_byte)
parsable = frame_id in ID3.FRAME_ID_TO_FIELD or frame_id == 'APIC'
if frame_size > 0:
# flags = frame[1+frame_size_bytes:] # dont care about flags.
if not parsable: # jump over unparsable frames
fh.seek(frame_size, os.SEEK_CUR)
return frame_size
content = fh.read(frame_size)
fieldname = ID3.FRAME_ID_TO_FIELD.get(frame_id)
if fieldname:
self._set_field(fieldname, content, self._decode_string)
elif frame_id == 'APIC' and self._load_image:
# See section 4.14: http://id3.org/id3v2.4.0-frames
mimetype_end_pos = content[1:].index(b'\x00')+1
desc_start_pos = mimetype_end_pos + 2
desc_end_pos = desc_start_pos + content[desc_start_pos:].index(b'\x00')
if content[desc_end_pos:desc_end_pos+1] == b'\x00':
desc_end_pos += 1 # the description ends with 1 or 2 null bytes
self._image_data = content[desc_end_pos:]
return frame_size
return 0
def _decode_string(self, b):
# it's not my fault, this is the spec.
try:
first_byte = b[:1]
if first_byte == b'\x00':
return self._unpad(codecs.decode(b[1:], 'ISO-8859-1'))
elif first_byte == b'\x01':
# read byte order mark to determine endianess
encoding = 'UTF-16be' if b[1:3] == b'\xfe\xff' else 'UTF-16le'
# strip the bom and optional null bytes
bytestr = b[3:-1] if len(b) % 2 == 0 else b[3:]
return self._unpad(codecs.decode(bytestr, encoding))
elif first_byte == b'\x02':
# strip optional null byte
bytestr = b[1:-1] if len(b) % 2 == 0 else b[1:]
return self._unpad(codecs.decode(bytestr, 'UTF-16le'))
elif first_byte == b'\x03':
return codecs.decode(b[1:], 'UTF-8')
return self._unpad(codecs.decode(b, 'ISO-8859-1'))
except UnicodeDecodeError:
raise TinyTagException('Error decoding ID3 Tag!')
def _calc_size(self, bytestr, bits_per_byte):
# length of some mp3 header fields is described
# by "7-bit-bytes" or sometimes 8-bit bytes...
ret = 0
for b in bytestr:
ret <<= bits_per_byte
ret += b
return ret
class Ogg(TinyTag):
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self._tags_parsed = False
self._max_samplenum = 0 # maximum sample position ever read
def _determine_duration(self, fh):
MAX_PAGE_SIZE = 65536 # https://xiph.org/ogg/doc/libogg/ogg_page.html
if not self._tags_parsed:
self._parse_tag(fh) # determine sample rate
fh.seek(0) # and rewind to start
if self.filesize > MAX_PAGE_SIZE:
fh.seek(-MAX_PAGE_SIZE, 2) # go to last possible page position
while True:
b = fh.peek(4)
if len(b) == 0:
return # EOF
if b[:4] == b'OggS': # look for an ogg header
for packet in self._parse_pages(fh):
pass # parse all remaining pages
self.duration = self._max_samplenum / float(self.samplerate)
else:
idx = b.find(b'OggS') # try to find header in peeked data
seekpos = idx if idx != -1 else len(b) - 3
fh.seek(max(seekpos, 1), os.SEEK_CUR)
def _parse_tag(self, fh):
page_start_pos = fh.tell() # set audio_offest later if its audio data
for packet in self._parse_pages(fh):
walker = BytesIO(packet)
if packet[0:7] == b"\x01vorbis":
(channels, self.samplerate, max_bitrate, bitrate,
min_bitrate) = struct.unpack("<B4i", packet[11:28])
if not self.audio_offset:
self.bitrate = bitrate / 1024
self.audio_offset = page_start_pos
elif packet[0:7] == b"\x03vorbis":
walker.seek(7, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
elif packet[0:8] == b'OpusHead': # parse opus header
# https://www.videolan.org/developers/vlc/modules/codec/opus_header.c
# https://mf4.xiph.org/jenkins/view/opus/job/opusfile-unix/ws/doc/html/structOpusHead.html
walker.seek(8, os.SEEK_CUR) # jump over header name
(version, ch, _, sr, _, _) = struct.unpack("<BBHIHB", walker.read(11))
if (version & 0xF0) == 0: # only major version 0 supported
self.channels = ch
self.samplerate = sr
elif packet[0:8] == b'OpusTags': # parse opus metadata:
walker.seek(8, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
else:
break
page_start_pos = fh.tell()
def _parse_vorbis_comment(self, fh):
# for the spec, see: http://xiph.org/vorbis/doc/v-comment.html
# discnumber tag based on: https://en.wikipedia.org/wiki/Vorbis_comment
comment_type_to_attr_mapping = {
'album': 'album',
'albumartist': 'albumartist',
'title': 'title',
'artist': 'artist',
'date': 'year',
'tracknumber': 'track',
'discnumber': 'disc',
'genre': 'genre'
}
vendor_length = struct.unpack('I', fh.read(4))[0]
fh.seek(vendor_length, os.SEEK_CUR) # jump over vendor
elements = struct.unpack('I', fh.read(4))[0]
for i in range(elements):
length = struct.unpack('I', fh.read(4))[0]
keyvalpair = codecs.decode(fh.read(length), 'UTF-8')
if '=' in keyvalpair:
splitidx = keyvalpair.index('=')
key, value = keyvalpair[:splitidx], keyvalpair[splitidx+1:]
fieldname = comment_type_to_attr_mapping.get(key.lower())
if fieldname:
self._set_field(fieldname, value)
def _parse_pages(self, fh):
# for the spec, see: https://wiki.xiph.org/Ogg
previous_page = b'' # contains data from previous (continuing) pages
header_data = fh.read(27) # read ogg page header
while len(header_data) != 0:
header = struct.unpack('<4sBBqIIiB', header_data)
oggs, version, flags, pos, serial, pageseq, crc, segments = header
self._max_samplenum = max(self._max_samplenum, pos)
if oggs != b'OggS' or version != 0:
raise TinyTagException('Not a valid ogg file!')
segsizes = struct.unpack('B'*segments, fh.read(segments))
total = 0
for segsize in segsizes: # read all segments
total += segsize
if total < 255: # less than 255 bytes means end of page
yield previous_page + fh.read(total)
previous_page = b''
total = 0
if total != 0:
if total % 255 == 0:
previous_page += fh.read(total)
else:
yield previous_page + fh.read(total)
previous_page = b''
header_data = fh.read(27)
class Wave(TinyTag):
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self._duration_parsed = False
def _determine_duration(self, fh):
# see: https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
# and: https://en.wikipedia.org/wiki/WAV
riff, size, fformat = struct.unpack('4sI4s', fh.read(12))
if riff != b'RIFF' or fformat != b'WAVE':
raise TinyTagException('not a wave file!')
channels, bitdepth = 2, 16 # assume CD quality
chunk_header = fh.read(8)
while len(chunk_header) == 8:
subchunkid, subchunksize = struct.unpack('4sI', chunk_header)
if subchunkid == b'fmt ':
_, channels, self.samplerate = struct.unpack('HHI', fh.read(8))
_, _, bitdepth = struct.unpack('<IHH', fh.read(8))
self.bitrate = self.samplerate * channels * bitdepth / 1024
elif subchunkid == b'data':
self.duration = float(subchunksize)/channels/self.samplerate/(bitdepth/8)
self.audio_offest = fh.tell() - 8 # rewind to data header
fh.seek(subchunksize, 1)
elif subchunkid == b'id3 ' or subchunkid == b'ID3 ':
id3 = ID3(fh, 0)
id3._parse_id3v2(fh)
self.update(id3)
else: # some other chunk, just skip the data
fh.seek(subchunksize, 1)
chunk_header = fh.read(8)
self._duration_parsed = True
def _parse_tag(self, fh):
if not self._duration_parsed:
self._determine_duration(fh) # parse_whole file to determine tags :(
class Flac(TinyTag):
METADATA_STREAMINFO = 0
METADATA_VORBIS_COMMENT = 4
def load(self, tags, duration, image=False):
if self._filehandler.read(4) != b'fLaC':
raise TinyTagException('Invalid flac header')
self._determine_duration(self._filehandler, skip_tags=not tags)
def _determine_duration(self, fh, skip_tags=False):
# for spec, see https://xiph.org/flac/ogg_mapping.html
header_data = fh.read(4)
while len(header_data):
meta_header = struct.unpack('B3B', header_data)
block_type = meta_header[0] & 0x7f
is_last_block = meta_header[0] & 0x80
size = self._bytes_to_int(meta_header[1:4])
# http://xiph.org/flac/format.html#metadata_block_streaminfo
if block_type == Flac.METADATA_STREAMINFO:
stream_info_header = fh.read(size)
if len(stream_info_header) < 34: # invalid streaminfo
break
header = struct.unpack('HH3s3s8B16s', stream_info_header)
# From the ciph documentation:
# py | <bits>
#----------------------------------------------
# H | <16> The minimum block size (in samples)
# H | <16> The maximum block size (in samples)
# 3s | <24> The minimum frame size (in bytes)
# 3s | <24> The maximum frame size (in bytes)
# 8B | <20> Sample rate in Hz.
# | <3> (number of channels)-1.
# | <5> (bits per sample)-1.
# | <36> Total samples in stream.
# 16s| <128> MD5 signature
#
min_blk, max_blk, min_frm, max_frm = header[0:4]
min_frm = self._bytes_to_int(struct.unpack('3B', min_frm))
max_frm = self._bytes_to_int(struct.unpack('3B', max_frm))
# channels-
# `. bits total samples
# |----- samplerate -----| |-||----| |---------~ ~----|
# 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000
# #---4---# #---5---# #---6---# #---7---# #--8-~ ~-12-#
self.samplerate = self._bytes_to_int(header[4:7]) >> 4
channels = ((header[6] >> 1) & 0x07) + 1
bit_depth = ((header[6] & 1) << 4) + ((header[7] & 0xF0) >> 4)
bit_depth = (bit_depth + 1)
total_sample_bytes = [(header[7] & 0x0F)] + list(header[8:12])
total_samples = self._bytes_to_int(total_sample_bytes)
md5 = header[12:]
self.duration = float(total_samples) / self.samplerate
if self.duration > 0:
self.bitrate = self.filesize / self.duration * 8 / 1024
#return
elif block_type == Flac.METADATA_VORBIS_COMMENT and not skip_tags:
oggtag = Ogg(fh, 0)
oggtag._parse_vorbis_comment(fh)
self.update(oggtag)
elif block_type >= 127:
return # invalid block type
else:
fh.seek(size, 1) # seek over this block
if is_last_block:
break
else:
header_data = fh.read(4)
class Wma(TinyTag):
ASF_CONTENT_DESCRIPTION_OBJECT = b'3&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel'
ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT = b'@\xa4\xd0\xd2\x07\xe3\xd2\x11\x97\xf0\x00\xa0\xc9^\xa8P'
STREAM_BITRATE_PROPERTIES_OBJECT = b'\xceu\xf8{\x8dF\xd1\x11\x8d\x82\x00`\x97\xc9\xa2\xb2'
ASF_FILE_PROPERTY_OBJECT = b'\xa1\xdc\xab\x8cG\xa9\xcf\x11\x8e\xe4\x00\xc0\x0c Se'
ASF_STREAM_PROPERTIES_OBJECT = b'\x91\x07\xdc\xb7\xb7\xa9\xcf\x11\x8e\xe6\x00\xc0\x0c Se'
STREAM_TYPE_ASF_AUDIO_MEDIA = b'@\x9ei\xf8M[\xcf\x11\xa8\xfd\x00\x80_\\D+'
# see:
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx
# and (japanese, but none the less helpful)
# http://uguisu.skr.jp/Windows/format_asf.html
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self.__tag_parsed = False
def _determine_duration(self, fh):
if not self.__tag_parsed:
self._parse_tag(fh)
def read_blocks(self, fh, blocks):
# blocks are a list(tuple('fieldname', byte_count, cast_int), ...)
decoded = {}
for block in blocks:
val = fh.read(block[1])
if block[2]:
val = self._bytes_to_int_le(val)
decoded[block[0]] = val
return decoded
def __bytes_to_guid(self, obj_id_bytes):
return '-'.join([
hex(self._bytes_to_int_le(obj_id_bytes[:-12]))[2:].zfill(6),
hex(self._bytes_to_int_le(obj_id_bytes[-12:-10]))[2:].zfill(4),
hex(self._bytes_to_int_le(obj_id_bytes[-10:-8]))[2:].zfill(4),
hex(self._bytes_to_int(obj_id_bytes[-8:-6]))[2:].zfill(4),
hex(self._bytes_to_int(obj_id_bytes[-6:]))[2:].zfill(12),
])
def __decode_string(self, bytestring):
return self._unpad(codecs.decode(bytestring, 'utf-16'))
def __decode_ext_desc(self, value_type, value):
''' decode ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT values'''
if value_type == 0: # Unicode string
return self.__decode_string(value)
elif value_type == 1: # BYTE array
return value
elif 1 < value_type < 6: # DWORD / QWORD / WORD
return self._bytes_to_int_le(value)
def _parse_tag(self, fh):
self.__tag_parsed = True
guid = fh.read(16) # 128 bit GUID
if guid != b'0&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel':
return # not a valid ASF container! see: http://www.garykessler.net/library/file_sigs.html
size = struct.unpack('Q', fh.read(8))[0]
obj_count = struct.unpack('I', fh.read(4))[0]
if fh.read(2) != b'\x01\x02':
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc521913958
return # not a valid asf header!
while True:
object_id = fh.read(16)
object_size = self._bytes_to_int_le(fh.read(8))
if object_size == 0 or object_size > self.filesize:
break # invalid object, stop parsing.
if object_id == Wma.ASF_CONTENT_DESCRIPTION_OBJECT:
len_blocks = self.read_blocks(fh, [
('title_length', 2, True),
('author_length', 2, True),
('copyright_length', 2, True),
('description_length', 2, True),
('rating_length', 2, True),
])
data_blocks = self.read_blocks(fh, [
('title', len_blocks['title_length'], False),
('artist', len_blocks['author_length'], False),
('', len_blocks['copyright_length'], True),
('', len_blocks['description_length'], True),
('', len_blocks['rating_length'], True),
])
for field_name, bytestring in data_blocks.items():
if field_name:
self._set_field(field_name, bytestring, self.__decode_string)
elif object_id == Wma.ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT:
mapping = {
'WM/TrackNumber': 'track',
'WM/PartOfSet': 'disc',
'WM/Year': 'year',
'WM/AlbumArtist': 'albumartist',
'WM/Genre': 'genre',
'WM/AlbumTitle': 'album',
}
# see: http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc509555195
descriptor_count = self._bytes_to_int_le(fh.read(2))
for _ in range(descriptor_count):
name_len = self._bytes_to_int_le(fh.read(2))
name = self.__decode_string(fh.read(name_len))
value_type = self._bytes_to_int_le(fh.read(2))
value_len = self._bytes_to_int_le(fh.read(2))
value = fh.read(value_len)
field_name = mapping.get(name)
if field_name:
field_value = self.__decode_ext_desc(value_type, value)
self._set_field(field_name, field_value)
elif object_id == Wma.ASF_FILE_PROPERTY_OBJECT:
blocks = self.read_blocks(fh, [
('file_id', 16, False),
('file_size', 8, False),
('creation_date', 8, True),
('data_packets_count', 8, True),
('play_duration', 8, True),
('send_duration', 8, True),
('preroll', 8, True),
('flags', 4, False),
('minimum_data_packet_size', 4, True),
('maximum_data_packet_size', 4, True),
('maximum_bitrate', 4, False),
])
self.duration = blocks.get('play_duration') / float(10000000)
elif object_id == Wma.ASF_STREAM_PROPERTIES_OBJECT:
blocks = self.read_blocks(fh, [
('stream_type', 16, False),
('error_correction_type', 16, False),
('time_offset', 8, True),
('type_specific_data_length', 4, True),
('error_correction_data_length', 4, True),
('flags', 2, True),
('reserved', 4, False)
])
already_read = 0
if blocks['stream_type'] == Wma.STREAM_TYPE_ASF_AUDIO_MEDIA:
stream_info = self.read_blocks(fh, [
('codec_id_format_tag', 2, True),
('number_of_channels', 2, True),
('samples_per_second', 4, True),
('avg_bytes_per_second', 4, True),
('block_alignment', 2, True),
('bits_per_sample', 2, True),
])
self.samplerate = stream_info['samples_per_second']
self.bitrate = stream_info['avg_bytes_per_second'] * 8 / float(1000)
already_read = 16
fh.seek(blocks['type_specific_data_length'] - already_read, os.SEEK_CUR)
fh.seek(blocks['error_correction_data_length'], os.SEEK_CUR)
else:
fh.seek(object_size - 24, os.SEEK_CUR) # read over onknown object ids
| 48,475 | Python | .py | 938 | 39.430704 | 142 | 0.552044 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,336 | __init__.py | devsnd_cherrymusic/tinytag/__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from .tinytag import TinyTag, TinyTagException, ID3, Ogg, Wave, Flac
import sys
__version__ = '0.15.0'
if __name__ == '__main__':
print(TinyTag.get(sys.argv[1])) | 210 | Python | .py | 7 | 28.142857 | 68 | 0.641791 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,337 | __init__.py | devsnd_cherrymusic/backport/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3,0):
from . import urllib
input = raw_input
else:
input = input
if sys.version_info < (3,2):
import optparse as argparse
argparse.ArgumentParser = argparse.OptionParser
argparse.ArgumentParser.add_argument = argparse.ArgumentParser.add_option
argparse.ArgumentParser.__parse_args__ = argparse.ArgumentParser.parse_args
def parseargs(self):
return self.__parse_args__()[0]
argparse.ArgumentParser.parse_args = parseargs
else:
import argparse
if (3,) <= sys.version_info < (3, 2):
import collections
def callable(x):
""" isinstance(x, collections.Callable)"""
return isinstance(x, collections.Callable)
else:
callable = callable
if sys.version_info < (3,0):
unichr = unichr
else:
unichr = chr
| 858 | Python | .py | 29 | 25.517241 | 79 | 0.696602 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,338 | __init__.py | devsnd_cherrymusic/backport/logging/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from logging import *
import sys
if sys.version_info < (2,7):
StreamHandler.__realinit__ = StreamHandler.__init__
def initStreamHandler(self,stream):
StreamHandler.__realinit__(self)
StreamHandler.__init__ = initStreamHandler
| 295 | Python | .py | 9 | 28.888889 | 55 | 0.692857 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,339 | __init__.py | devsnd_cherrymusic/backport/urllib/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from . import parse
from . import request
| 85 | Python | .py | 4 | 20.25 | 23 | 0.679012 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,340 | __init__.py | devsnd_cherrymusic/backport/urllib/request/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3,):
from urllib2 import *
| 109 | Python | .py | 5 | 19.8 | 27 | 0.640777 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,341 | __init__.py | devsnd_cherrymusic/backport/urllib/parse/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3,):
from urllib2 import *
from urllib import unquote
from urlparse import urlparse
| 174 | Python | .py | 7 | 22 | 33 | 0.692771 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,342 | _backported.py | devsnd_cherrymusic/backport/collections/_backported.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
# http://code.activestate.com/recipes/576611-counter-class/
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| 10,402 | Python | .py | 261 | 31.141762 | 85 | 0.565252 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,343 | __init__.py | devsnd_cherrymusic/backport/collections/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from collections import *
if 'OrderedDict' not in dir():
from _backported import Counter
from _backported import OrderedDict
| 178 | Python | .py | 6 | 27 | 39 | 0.723529 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,344 | __init__.py | devsnd_cherrymusic/backport/configparser/__init__.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3,0):
from ConfigParser import ConfigParser
else:
from configparser import ConfigParser
| 174 | Python | .py | 7 | 22.571429 | 41 | 0.73494 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,345 | conf.py | devsnd_cherrymusic/doc/sphinx/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 1 23:32:37 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
import cherrymusicserver as cherry
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CherryMusic'
copyright = '2012 - 2014, Tom Wallroth, with Tilman Boerner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = os.path.splitext(cherry.VERSION)[0]
# The full version, including alpha/beta/rc tags.
release = cherry.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
# 'textcolor': '#333333',
'headingcolor': '#892601',
'linkcolor': '#2c5792',
'visitedlinkcolor': '#0c3762',
# 'hoverlinkcolor': '#0c3762',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
html_title = 'CherryMusic %s documentation' % (cherry.VERSION,)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cherrymusicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CherryMusic.tex', 'CherryMusic Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'CherryMusic', 'CherryMusic Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CherryMusic', 'CherryMusic Documentation',
'Author', 'CherryMusic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'CherryMusic'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2012 - 2014, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 9,483 | Python | .py | 219 | 41.744292 | 80 | 0.723335 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,346 | release.py | devsnd_cherrymusic/devscripts/release.py | #!/usr/bin/python3
import subprocess
import os
import sys
import codecs
import time
usage = """
%s --major
prepare a major release, e.g. 1.3.5 --> 2.3.5
%s --minor
prepare a minor release, e.g. 1.3.5 --> 1.4.5
%s --path
prepare a patch release, e.g. 1.3.5 --> 1.3.6
""" % (__file__,__file__,__file__,)
if (2 > len(sys.argv) == 1) or not sys.argv[1] in ['--major','--minor','--patch']:
print(usage)
sys.exit(1)
else:
release_type = sys.argv[1][2:] # = 'major' 'minor' or 'patch'
CM_MAIN_FOLDER = os.path.join(os.path.dirname(__file__), '..')
os.chdir(CM_MAIN_FOLDER)
output = subprocess.check_output(['python', '-c', 'import cherrymusicserver; print(cherrymusicserver.__version__)'])
rawcmversion = codecs.decode(output, 'UTF-8')
major, minor, patch = [int(v) for v in rawcmversion.split('.')]
version_now = (major, minor, patch)
if release_type == 'major':
version_next = (major+1, 0, 0)
elif release_type == 'minor':
version_next = (major, minor+1, 0)
elif release_type == 'patch':
version_next = (major, minor, patch+1)
######## CHANGE INIT SCRIPT VERSION NUMBER #####
initscript = None
with open('cherrymusicserver/__init__.py', 'r', encoding='UTF-8') as fh:
initscript = fh.read()
version_line_tpl = '''VERSION = "%d.%d.%d"'''
version_now_line = version_line_tpl % version_now
version_next_line = version_line_tpl % version_next
if initscript.find(version_now_line) == -1:
print('''Cannot find version string in startup script! Looking for:
%s
''' % version_now_line)
sys.exit(1)
print('Changing version number in startup script. %s --> %s' %
(version_now_line, version_next_line))
initscript = initscript.replace(version_now_line, version_next_line)
with open('cherrymusicserver/__init__.py', 'w', encoding='UTF-8') as fh:
fh.write(initscript)
######## UPDATE CHANGELOG #####
changelog_lines = None
t = time.gmtime()
with open('CHANGES', 'r', encoding='UTF-8') as fh:
changelog_lines = fh.readlines()
with open('CHANGES', 'w', encoding='UTF-8') as fh:
fh.write('Changelog\n---------\n\n')
fh.write('%d.%d.%d ' % version_next)
fh.write('(%d-%02d-%02d)\n' % (t.tm_year, t.tm_mon, t.tm_mday))
fh.write(' - FEATURE: ... new feature here!\n')
fh.write(' - FIXED:\n')
fh.write(' - IMPROVEMENT:\n\n')
fh.write(''.join(changelog_lines[3:])) # leave out header
subprocess.call(['nano', 'CHANGES'])
####### PREPARE COMMIT AND REVIEW DIFF
subprocess.call(['git', 'add', 'CHANGES'])
subprocess.call(['git', 'add', 'cherrymusicserver/__init__.py'])
subprocess.call(['git', 'diff', '--staged'])
if input('Are you happy now? (y/n)') != 'y':
print('''user unhappy. revert changes with
git checkout CHANGES
git checkout cherrymusicserver/__init__.py
''')
sys.exit(1)
print('creating tagged commit...')
version_name = 'release %d.%d.%d' % version_next
tag_name = '%d.%d.%d' % version_next
subprocess.call(['git', 'commit', '-m', '"%s"' % version_name])
subprocess.call(['git', 'tag', '-a', '-m', '"%s"' % version_name, tag_name])
print('''all done, you can push the changes now! e.g.:
git push --tags
''')
| 3,142 | Python | .py | 80 | 36.4375 | 116 | 0.638898 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,347 | deploy.py | devsnd_cherrymusic/devscripts/deploy.py | #!/usr/bin/python3
import subprocess as sp
import re
import os
import hashlib
MAIN_CM_FOLDER = os.path.dirname(os.path.dirname(__file__))
DEVEL_INPUT_HTML = os.path.join(MAIN_CM_FOLDER, 'res/devel.html')
MAIN_OUTPUT_HTML = os.path.join(MAIN_CM_FOLDER, 'res/dist/main.html')
LESSC = 'lessc'
JSMIN = 'jsmin'
def prog_exists(exe):
try:
with open(os.devnull,'w') as devnull:
prog = sp.Popen([exe], stdin=devnull, stdout=devnull)
stout, sterr = prog.communicate('')
except (IOError, OSError):
print('Warning: "%s" was not found.' % exe)
return False
return True
if not (prog_exists(LESSC) and prog_exists(JSMIN)):
print('''=== WARNING: CANNOT DEPLOY ===
For automatic deployment, please install jsmin and the less-css compiler
and make sure they are in your $PATH.''')
exit(0)
def compile_less(in_less_file, out_css_file):
LESSC_OPTS = ['--include-path='+os.path.dirname(in_less_file),'-'] #['--yui-compress', '-']
print(" compiling %s to %s"%(in_less_file, out_css_file))
with open(in_less_file, 'rb') as fr:
with open(out_css_file, 'wb') as fw:
less_file_dir = os.path.dirname(in_less_file)
compiler = sp.Popen([LESSC]+LESSC_OPTS,
stdin=sp.PIPE,
stdout=sp.PIPE)
stout, sterr = compiler.communicate(fr.read())
fw.write(stout)
print(" Wrote %s bytes."% fw.tell())
def parse_args(argsstr):
argpairs = [x.split('=') for x in argsstr.strip().split(' ')]
return dict(argpairs)
def match_less_compile(match):
args = parse_args(match.group(1))
lessfile = re.findall('href="([^"]+)', match.group(2))[0]
outfile = args['out']
compile_less(lessfile, outfile)
return '<link href="%s" media="all" rel="stylesheet" type="text/css" />' % outfile
def compile_jsmin(instr, outfile):
with open(outfile, 'wb') as fw:
compiler = sp.Popen([JSMIN], stdin=sp.PIPE, stdout=sp.PIPE)
stout, sterr = compiler.communicate(instr)
fw.write(stout)
print("compressed to %s bytes."% fw.tell())
print("that's %d%% less" % (100 - fw.tell()/len(instr)*100) )
def match_js_concat_min(match):
args = parse_args(match.group(1))
jsstr = b''
for scriptpath in re.findall('<script.*src="([^"]+)"', match.group(2)):
with open(scriptpath, 'rb') as script:
jsstr += script.read()
jsstr += b';\n'
jshash = hashlib.md5(jsstr).hexdigest()
print('calculated hash %s' % jshash)
print('js scripts uncompressed %d bytes' % len(jsstr))
outfilename = args['out']
#dotpos = outfilename.rindex('.')
#outfilename = outfilename[:dotpos]+jshash+outfilename[dotpos:]
compile_jsmin(jsstr, outfilename)
return '<script type="text/javascript" src="%s"></script>' % outfilename
def remove_whitespace(html):
no_white = re.sub('\s+', ' ', html, flags=re.MULTILINE)
print('removed whitespace. before %d bytes, after %d bytes.' % (len(html), len(no_white)))
print("that's %d%% less" % (100 - len(no_white)/len(html)*100) )
return no_white
html = None
with open(DEVEL_INPUT_HTML, 'r') as develhtml:
html = develhtml.read()
html = re.sub('<!--LESS-TO-CSS-BEGIN([^>]*)-->(.*)<!--LESS-TO-CSS-END-->',
match_less_compile,
html,
flags=re.MULTILINE | re.DOTALL)
html = re.sub('<!--REMOVE-BEGIN-->(.*)<!--REMOVE-END-->',
'',
html,
flags=re.MULTILINE | re.DOTALL)
html = re.sub('<!--COMPRESS-JS-BEGIN([^>]*)-->(.*)<!--COMPRESS-JS-END-->',
match_js_concat_min,
html,
flags=re.MULTILINE | re.DOTALL)
html = remove_whitespace(html)
with open(MAIN_OUTPUT_HTML, 'w') as mainhtml:
mainhtml.write(html)
| 3,846 | Python | .py | 90 | 35.844444 | 95 | 0.607439 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,348 | scrollspy.js | devsnd_cherrymusic/res/bootstrap3/js/scrollspy.js | /* ========================================================================
* Bootstrap: scrollspy.js v3.0.0
* http://twbs.github.com/bootstrap/javascript.html#scrollspy
* ========================================================================
* Copyright 2012 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ======================================================================== */
+function ($) { "use strict";
// SCROLLSPY CLASS DEFINITION
// ==========================
function ScrollSpy(element, options) {
var href
var process = $.proxy(this.process, this)
this.$element = $(element).is('body') ? $(window) : $(element)
this.$body = $('body')
this.$scrollElement = this.$element.on('scroll.bs.scroll-spy.data-api', process)
this.options = $.extend({}, ScrollSpy.DEFAULTS, options)
this.selector = (this.options.target
|| ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
|| '') + ' .nav li > a'
this.offsets = $([])
this.targets = $([])
this.activeTarget = null
this.refresh()
this.process()
}
ScrollSpy.DEFAULTS = {
offset: 10
}
ScrollSpy.prototype.refresh = function () {
var offsetMethod = this.$element[0] == window ? 'offset' : 'position'
this.offsets = $([])
this.targets = $([])
var self = this
var $targets = this.$body
.find(this.selector)
.map(function () {
var $el = $(this)
var href = $el.data('target') || $el.attr('href')
var $href = /^#\w/.test(href) && $(href)
return ($href
&& $href.length
&& [[ $href[offsetMethod]().top + (!$.isWindow(self.$scrollElement.get(0)) && self.$scrollElement.scrollTop()), href ]]) || null
})
.sort(function (a, b) { return a[0] - b[0] })
.each(function () {
self.offsets.push(this[0])
self.targets.push(this[1])
})
}
ScrollSpy.prototype.process = function () {
var scrollTop = this.$scrollElement.scrollTop() + this.options.offset
var scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight
var maxScroll = scrollHeight - this.$scrollElement.height()
var offsets = this.offsets
var targets = this.targets
var activeTarget = this.activeTarget
var i
if (scrollTop >= maxScroll) {
return activeTarget != (i = targets.last()[0]) && this.activate(i)
}
for (i = offsets.length; i--;) {
activeTarget != targets[i]
&& scrollTop >= offsets[i]
&& (!offsets[i + 1] || scrollTop <= offsets[i + 1])
&& this.activate( targets[i] )
}
}
ScrollSpy.prototype.activate = function (target) {
this.activeTarget = target
$(this.selector)
.parents('.active')
.removeClass('active')
var selector = this.selector
+ '[data-target="' + target + '"],'
+ this.selector + '[href="' + target + '"]'
var active = $(selector)
.parents('li')
.addClass('active')
if (active.parent('.dropdown-menu').length) {
active = active
.closest('li.dropdown')
.addClass('active')
}
active.trigger('activate')
}
// SCROLLSPY PLUGIN DEFINITION
// ===========================
var old = $.fn.scrollspy
$.fn.scrollspy = function (option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.scrollspy')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
if (typeof option == 'string') data[option]()
})
}
$.fn.scrollspy.Constructor = ScrollSpy
// SCROLLSPY NO CONFLICT
// =====================
$.fn.scrollspy.noConflict = function () {
$.fn.scrollspy = old
return this
}
// SCROLLSPY DATA-API
// ==================
$(window).on('load', function () {
$('[data-spy="scroll"]').each(function () {
var $spy = $(this)
$spy.scrollspy($spy.data())
})
})
}(window.jQuery);
| 4,642 | Python | .py | 125 | 31.96 | 138 | 0.56289 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,349 | scrollspy.js | devsnd_cherrymusic/res/bootstrap3/js/tests/unit/scrollspy.js | $(function () {
module("scrollspy")
test("should provide no conflict", function () {
var scrollspy = $.fn.scrollspy.noConflict()
ok(!$.fn.scrollspy, 'scrollspy was set back to undefined (org value)')
$.fn.scrollspy = scrollspy
})
test("should be defined on jquery object", function () {
ok($(document.body).scrollspy, 'scrollspy method is defined')
})
test("should return element", function () {
ok($(document.body).scrollspy()[0] == document.body, 'document.body returned')
})
test("should switch active class on scroll", function () {
var sectionHTML = '<div id="masthead"></div>'
, $section = $(sectionHTML).append('#qunit-fixture')
, topbarHTML ='<div class="topbar">'
+ '<div class="topbar-inner">'
+ '<div class="container">'
+ '<h3><a href="#">Bootstrap</a></h3>'
+ '<ul class="nav">'
+ '<li><a href="#masthead">Overview</a></li>'
+ '</ul>'
+ '</div>'
+ '</div>'
+ '</div>'
, $topbar = $(topbarHTML).scrollspy()
ok($topbar.find('.active', true))
})
})
| 1,194 | Python | .py | 30 | 30.966667 | 86 | 0.535004 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,350 | update_translations.py | devsnd_cherrymusic/res/i18n/update_translations.py | #!/usr/bin/python3
import subprocess
import os
currdir = os.path.relpath(os.path.dirname(__file__), start=os.getcwd())
sourcedir = os.path.normpath(os.path.join(currdir, '..', '..', 'cherrymusicserver'))
print('updating pot file')
subprocess.call('xgettext --language=Python --keyword=_ --add-comments=i18n --output='+currdir+'/cherrymusic.pot --from-code=UTF-8 `find '+sourcedir+' -name "*.py"`', shell=True)
print('updating all translations')
for translation in os.listdir(currdir):
transfile = os.path.join(currdir, translation)
if os.path.isdir(transfile):
print(' merging %s' % transfile)
subprocess.call('msgmerge --update '+transfile+'/LC_MESSAGES/default.po '+currdir+'/cherrymusic.pot', shell=True)
print(' compiling %s' % transfile)
subprocess.call('msgfmt -o '+transfile+'/LC_MESSAGES/default.mo -v '+transfile+'/LC_MESSAGES/default.po', shell=True)
| 910 | Python | .py | 15 | 56.8 | 178 | 0.702915 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,351 | __init__.py | devsnd_cherrymusic/audiotranscode/__init__.py | #!/usr/bin/python3
"""
audiotranscode
Copyright (c) 2013 Tom Wallroth
Sources on github:
http://github.com/devsnd/audiotranscode/
licensed under GNU GPL version 3 (or later)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__version__ = '0.2'
import subprocess
import os
import time
from distutils.spawn import find_executable
MIMETYPES = {
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'oga': 'audio/ogg',
'flac': 'audio/flac',
'aac': 'audio/aac',
'm4a': 'audio/m4a',
'wav': 'audio/wav',
'wma' : 'audio/x-ms-wma',
'opus': 'audio/ogg; codecs=opus',
}
class Transcoder(object):
"""super class for encoders and decoders"""
devnull = open(os.devnull, 'w')
def __init__(self):
self.command = ['']
def available(self):
"""checks if the command defined in the encoder or decoder is
available by calling it once"""
return bool(find_executable(self.command[0]))
class Encoder(Transcoder):
"""encoder"""
def __init__(self, filetype, command):
Transcoder.__init__(self)
self.filetype = filetype
self.mimetype = MIMETYPES[filetype]
self.command = command
def encode(self, decoder_process, bitrate):
"""encodes the raw audio stream coming from the decoder_process
using the spedcified command"""
# get the absolute path under which the executable is found
cmd = [find_executable(self.command[0])] + self.command[1:]
if 'BITRATE' in cmd:
cmd[cmd.index('BITRATE')] = str(bitrate)
if 'KBITRATE' in cmd:
cmd[cmd.index('KBITRATE')] = str(bitrate) + 'k'
return subprocess.Popen(cmd,
stdin=decoder_process.stdout,
stdout=subprocess.PIPE,
stderr=Transcoder.devnull
)
def __str__(self):
return "<Encoder type='%s' cmd='%s'>" % (self.filetype,
str(' '.join(self.command)))
def __repr__(self):
return self.__str__()
class Decoder(Transcoder):
"""decoder"""
def __init__(self, filetype, command):
Transcoder.__init__(self)
self.filetype = filetype
self.mimetype = MIMETYPES[filetype]
self.command = command
def decode(self, filepath, starttime=0):
"""returns the process the decodes the file to a raw audio stream"""
# get the absolute path under which the executable is found
cmd = [find_executable(self.command[0])] + self.command[1:]
if 'INPUT' in cmd:
cmd[cmd.index('INPUT')] = filepath
if 'STARTTIME' in cmd:
hours, minutes, seconds = starttime//3600, starttime//60%60, starttime%60
cmd[cmd.index('STARTTIME')] = '%d:%d:%d' % (hours, minutes, seconds)
return subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=Transcoder.devnull
)
def __str__(self):
return "<Decoder type='%s' cmd='%s'>" % (self.filetype,
str(' '.join(self.command)))
def __repr__(self):
return self.__str__()
class TranscodeError(Exception):
"""exception for if either a decoder or encoder error has occurred"""
def __init__(self, value):
Exception.__init__(self, value)
self.value = value
def __str__(self):
return repr(self.value)
class EncodeError(TranscodeError):
"""exception if an encoder is missing"""
def __init__(self, value):
TranscodeError.__init__(self, value)
self.value = value
def __str__(self):
return repr(self.value)
class DecodeError(TranscodeError):
"""exception if a decoder is missing"""
def __init__(self, value):
TranscodeError.__init__(self, value)
self.value = value
def __str__(self):
return repr(self.value)
def _filetype(filepath):
"""returns the file extension of a file"""
if '.' in filepath:
return filepath.lower()[filepath.rindex('.')+1:]
def mime_type(file_extension):
"""return a mimetype based on the file extension"""
return MIMETYPES.get(file_extension)
class AudioTranscode:
"""main class that manages encoders and decoders
call transcode(infile, outfile) for file transformations
or transcode_stream to get a generator of the encoded stream"""
READ_BUFFER = 1024
Encoders = [
# encoders take input from stdin and write output to stout
# Encoder('ogg', ['ffmpeg', '-i', '-', '-f', 'ogg', '-c:a', 'libvorbis', '-b', 'KBITRATE', '-']),
Encoder('ogg', ['oggenc', '--resample', '44100', '-b', 'BITRATE', '-']),
Encoder('mp3', ['lame', '-b', 'BITRATE', '-', '-']),
Encoder('aac', ['faac', '-b', 'BITRATE', '-P', '-X', '-o', '-', '-']),
Encoder('m4a', ['faac', '-b', 'BITRATE', '-P', '-X', '-o', '-', '-']),
Encoder('flac', ['flac', '--force-raw-format', '--endian=little',
'--channels=2', '--bps=16', '--sample-rate=44100',
'--sign=signed', '-o', '-', '-']),
Encoder('wav', ['cat']),
Encoder('opus', ['opusenc', '--bitrate', 'BITRATE', '--quiet',
'-', '-']),
]
Decoders = [
#INPUT is replaced with filepath
Decoder('mp3', ['mpg123', '-w', '-', 'INPUT']),
Decoder('mp3', ['ffmpeg', '-ss', 'STARTTIME',
'-i', 'INPUT', '-f', 'wav',
'-acodec', 'pcm_s16le', '-']),
Decoder('wma', ['ffmpeg', '-ss', 'STARTTIME',
'-i', 'INPUT', '-f', 'wav',
'-acodec', 'pcm_s16le', '-']),
Decoder('ogg', ['oggdec', '-Q', '-b', '16', '-o', '-', 'INPUT']),
Decoder('ogg', ['ffmpeg',
'-ss', 'STARTTIME',
'-i', 'INPUT',
'-f', 'wav',
'-acodec', 'pcm_s16le',
'-']),
# duplicate ogg decoders for oga files
Decoder('oga', ['oggdec', '-Q', '-b', '16', '-o', '-', 'INPUT']),
Decoder('oga', ['ffmpeg',
'-ss', 'STARTTIME',
'-i', 'INPUT',
'-f', 'wav',
'-acodec', 'pcm_s16le',
'-']),
Decoder('flac', ['flac', '-F', '-d', '-c', 'INPUT']),
Decoder('aac', ['faad', '-w', 'INPUT']),
# prefer ffmpeg over faad for decoding to handle ALAC streams #584
Decoder('m4a', ['ffmpeg', '-ss', 'STARTTIME',
'-i', 'INPUT', '-f', 'wav',
'-acodec', 'pcm_s16le', '-']),
Decoder('m4a', ['faad', '-w', 'INPUT']),
Decoder('wav', ['cat', 'INPUT']),
Decoder('opus', ['opusdec', 'INPUT', '--force-wav', '--quiet', '-']),
]
def __init__(self, debug=False):
self.debug = debug
self.available_encoders = [enc for enc in AudioTranscode.Encoders
if enc.available()]
self.available_decoders = [dec for dec in AudioTranscode.Decoders
if dec.available()]
self.bitrate = {'mp3': 160, 'ogg': 128, 'aac': 128, 'opus': '64'}
def available_encoder_formats(self):
"""returns the names of all available encoder formats"""
return set(enc.filetype for enc in self.available_encoders)
def available_decoder_formats(self):
"""returns the names of all available decoder formats"""
return set(dec.filetype for dec in self.available_decoders)
def _decode(self, filepath, decoder=None, starttime=0):
"""find the correct decoder and return a decoder process"""
if not os.path.exists(filepath):
filepath = os.path.abspath(filepath)
errmsg = 'File not Found! Cannot decode "file" %s'
raise IOError(errmsg % filepath)
filetype = _filetype(filepath)
if not filetype in self.available_decoder_formats():
errmsg = 'No decoder available to handle filetype %s'
raise DecodeError(errmsg % filetype)
elif not decoder:
for dec in self.available_decoders:
if dec.filetype == filetype:
decoder = dec
break
if self.debug:
print(decoder)
return decoder.decode(filepath, starttime=starttime)
def _encode(self, audio_format, decoder_process,
bitrate=None, encoder=None):
"""find the correct encoder and pass in the decoder process,
returns the encoder process"""
if not bitrate:
bitrate = self.bitrate.get(audio_format)
if not bitrate:
bitrate = 128
if not encoder:
for enc in self.available_encoders:
if enc.filetype == audio_format:
encoder = enc
break
if self.debug:
print(encoder)
return encoder.encode(decoder_process, bitrate)
def check_encoder_available(self, audio_format):
"""checks if an encoder for this audio format is available"""
if not audio_format in self.available_encoder_formats():
errmsg = 'No encoder available to handle audio format %s'
raise EncodeError(errmsg % audio_format)
def transcode(self, in_file, out_file, bitrate=None):
"""transcodes one file into another format. the filetype is
determined using the file extension of those files"""
audioformat = _filetype(out_file)
self.check_encoder_available(audioformat)
with open(out_file, 'wb') as fhandler:
for data in self.transcode_stream(in_file, audioformat, bitrate):
fhandler.write(data)
fhandler.close()
def transcode_stream(self, filepath, newformat, bitrate=None,
encoder=None, decoder=None, starttime=0):
"""returns a generator wih the bytestream of the encoded audio
stream"""
if not encoder:
self.check_encoder_available(newformat)
decoder_process = None
encoder_process = None
try:
decoder_process = self._decode(filepath, decoder,
starttime=starttime)
encoder_process = self._encode(newformat, decoder_process,
bitrate=bitrate, encoder=encoder)
while encoder_process.poll() is None:
data = encoder_process.stdout.read(AudioTranscode.READ_BUFFER)
if data is None:
time.sleep(0.1) # wait for new data...
break
yield data
yield encoder_process.stdout.read()
finally:
if decoder_process and decoder_process.poll() is None:
if decoder_process.stderr:
decoder_process.stderr.close()
if decoder_process.stdout:
decoder_process.stdout.close()
if decoder_process.stdin:
decoder_process.stdin.close()
decoder_process.terminate()
if encoder_process:
encoder_process.stdout.close()
if encoder_process.stdin:
encoder_process.stdin.close()
if encoder_process.stderr:
encoder_process.stderr.close()
encoder_process.wait()
| 12,242 | Python | .py | 272 | 33.702206 | 105 | 0.556562 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,352 | test_transcode.py | devsnd_cherrymusic/audiotranscode/test/test_transcode.py | #!/usr/bin/python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import os
import tempfile
from nose.tools import *
import audiotranscode as transcode
transcoder = transcode.AudioTranscode(debug=True)
testdir = os.path.dirname(__file__)
testfiles = {
'mp3' : os.path.join(testdir,'test.mp3'),
'ogg' : os.path.join(testdir,'test.ogg'),
'flac': os.path.join(testdir,'test.flac'),
'wav': os.path.join(testdir,'test.wav'),
'm4a': os.path.join(testdir,'test.m4a'),
'wma': os.path.join(testdir,'test.wma'),
}
outputpath = tempfile.mkdtemp(prefix='test.audiotranscode.output.')
def generictestfunc(filepath, newformat, encoder, decoder):
ident = "%s_%s_to_%s_%s" % (
decoder.command[0],
os.path.basename(filepath),
encoder.command[0],
newformat
)
outdata = b''
for data in transcoder.transcode_stream(filepath, newformat, encoder=encoder, decoder=decoder):
outdata += data
ok_(len(outdata)>0, 'No data received: '+ident)
with open(os.path.join(outputpath,ident+'.'+newformat),'wb') as outfile:
outfile.write(outdata)
def test_generator():
for enc in transcoder.Encoders:
if not enc.available():
print('Encoder %s not installed!'%enc.command[0])
continue
for dec in transcoder.Decoders:
if not dec.available():
print('Encoder %s not installed!'%dec.command[0])
continue
if dec.filetype in testfiles:
yield generictestfunc, testfiles[dec.filetype], enc.filetype, enc, dec
| 2,593 | Python | .py | 68 | 33.617647 | 99 | 0.691422 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,353 | configuration.py | devsnd_cherrymusic/cherrymusicserver/configuration.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
from io import open
import itertools
import os
import re
import weakref
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from collections import namedtuple
from backport.collections import OrderedDict
from backport import callable
from cherrymusicserver import util
from cherrymusicserver import log as logging
def _validate_basedir(basedir):
if not basedir:
raise ValueError('basedir must be set')
if not os.path.isabs(basedir):
raise ValueError('basedir must be absolute path: {basedir}'.format(basedir=basedir))
if not os.path.exists(basedir):
raise ValueError("basedir must exist: {basedir}".format(basedir=basedir))
if not os.path.isdir(basedir):
raise ValueError("basedir must be a directory: {basedir}".format(basedir=basedir))
return True
def from_defaults():
'''load default configuration. must work if path to standard config file is unknown.'''
c = ConfigBuilder()
with c['media.basedir'] as basedir:
basedir.value = None
basedir.valid = _validate_basedir
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
basedir.doc = _("""
BASEDIR specifies where the media that should be
served is located. It must be an absolute path, e.g.
BASEDIR=/absolute/path/to/media.
Links: If your operating system supports them,
you can use symlinks directly in BASEDIR. Links to
directories which contain BASEDIR will be ignored,
just like all links not directly in, but in sublevels
of BASEDIR. This is to guard against the adverse
effects of link cycles.
""")
with c['media.transcode'] as transcode:
transcode.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
transcode.doc = _("""
TRANSCODE (experimental!) enables automatic live transcoding
of the media to be able to listen to every format on every device.
This requires you to have the appropriate codecs installed.
Please note that transcoding will significantly increase the stress on the CPU!
""")
with c['media.fetch_album_art'] as fetch:
fetch.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
fetch.doc = _("""
Tries to fetch the album cover from various locations in the web,
if no image is found locally. By default it will be fetched from iTunes.
They will be shown next to folders that qualify as a possible
album.
""")
with c['media.show_subfolder_count'] as subfoldercount:
subfoldercount.value = True
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
subfoldercount.doc = _("""
Show the number of sub-folders and tracks contained
in any folder. This will increase the stress for the
server, so if you're running CherryMusic on a 386DX II
or similar, it is recommended to deactivate this feature.
""")
with c['media.maximum_download_size'] as maxdl:
maxdl.value = 1024*1024*250
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
maxdl.doc = _("""
Maximum size in bytes of all files to be downloaded in one zipfile.
Defaults to {default_value} {default_unit}.
""".format(default_value='250', default_unit=_('megabytes')))
with c['search.maxresults'] as maxresults:
maxresults.value = 20
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
maxresults.doc = _("""
MAXRESULTS sets the maximum amount of search results
to be displayed. If MAXRESULTS is set to a higher value,
the search will take longer, but will also be more accurate.
""")
with c['search.load_file_db_into_memory'] as memory:
memory.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
memory.doc = _("""
This will load parts of the database into memory for improved
performance. This option should only be used on systems with
sufficient memory, because it will hurt the performance otherwise.
""")
with c['browser.maxshowfiles'] as maxshowfiles:
maxshowfiles.value = 100
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
maxshowfiles.doc = _('''
MAXSHOWFILES specifies how many files and folders should
be shown at the same time. E.g. if you open a folder
with more than MAXSHOWFILES, the files will be grouped
according to the first letter in their name.
100 is a good value, as a CD can have up to 99 tracks.
''')
with c['browser.pure_database_lookup'] as pure_database_lookup:
pure_database_lookup.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
pure_database_lookup.doc = _("""
Only use the media database, never the filesystem, for content
lookups in browser and search. Useful if the media files reside
on an external hard drive or behind a slow network connection.
""")
with c['server.port'] as port:
port.value = 8080
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
port.doc = _('The port the server will listen to.')
with c['server.ipv6_enabled'] as ipv6:
ipv6.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
ipv6.doc = _("""When set to true, the server will listen on a IPv6
socket instead of IPv4""")
with c['server.localhost_only'] as localhost_only:
localhost_only.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
localhost_only.doc = _('''
When localhost_only is set to true, the server will not
be visible in the network and only play music on the
same computer it is running on.
Activating this option binds the server to IP 127.0.0.1 or
[::1], depending on whether server.ipv6_enabled is true.
The server should also be reachable as "localhost" in any
case.
''')
with c['server.rootpath'] as rootpath:
rootpath.value = '/'
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
rootpath.doc = _('''
The path cherrymusic will be available on. Normally
you'll want to leave it as '/', so that CherryMusic is
available under e.g. localhost:8080. You might want to
change the path if CherryMusic runs behind a reverse
proxy. Changing it to '/cherrymusic' will make it available
under e.g. localhost:8080/cherrymusic
''')
with c['server.localhost_auto_login'] as localhost_auto_login:
localhost_auto_login.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
localhost_auto_login.doc = _('''
When localhost_auto_login is set to "True", the server will
not ask for credentials when using it locally. The user will
be automatically logged in as admin.
''')
with c['server.permit_remote_admin_login'] as permit_remote_admin_login:
permit_remote_admin_login.value = True
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
permit_remote_admin_login.doc = _('''
When permit_remote_admin_login is set to "False", admin users
may only log in from the computer cherrymusic is currently
running on. This can improve security.
''')
with c['server.keep_session_in_ram'] as keep_session_in_ram:
keep_session_in_ram.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
keep_session_in_ram.doc = _('''
Will keep the user sessions in RAM instead of a file in the
configuration directory. This means, that any unsaved
playlists will be lost when the server is restarted.
''')
with c['server.session_duration'] as session_duration:
session_duration.value = 60 * 24
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
session_duration.doc = _('''
Duration in minutes of the user sessions. Note that this
will not affect auto logged-in users.
''')
with c['server.ssl_enabled'] as ssl_enabled:
ssl_enabled.value = False
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
ssl_enabled.doc = _('''
The following options allow you to use cherrymusic with
https encryption. If ssl_enabled is set to "False", all other
ssl options will be ommited.
''')
with c['server.ssl_port'] as ssl_port:
ssl_port.value = 8443
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
ssl_port.doc = _('''
The port that will listen to SSL encrypted requests. If
ssl_enabled is set to "True", all unencrypted HTTP requests
will be redirected to this port.
''')
with c['server.ssl_certificate'] as ssl_certificate:
ssl_certificate.value = 'certs/server.crt'
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
ssl_certificate.doc = _('''
The SSL certiticate sent to the client to verify the
server's authenticity. A relative path is relative to the
location of the CherryMusic configuration file.
''')
with c['server.ssl_private_key'] as ssl_private_key:
ssl_private_key.value = 'certs/server.key'
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
ssl_private_key.doc = _('''
SSL private key file used by the server to decrypt and sign
secure communications. Keep this one secret! A relative
path is relative to the location of the CherryMusic
configuration file.
''')
with c['general.update_notification'] as update_notification:
update_notification.value = True
# i18n: Don't mind whitespace - string will be re-wrapped automatically. Use blank lines to separate paragraphs.
update_notification.doc = _('''
Notify admins about available security and feature updates.
''')
return c.to_configuration()
def from_configparser(filepath):
"""Have an ini file that the python configparser can understand? Pass the filepath
to this function, and a matching Configuration will magically be returned."""
if not os.path.exists(filepath):
logging.error(_('configuration file not found: %(filepath)s'), {'filepath':filepath})
return None
if not os.path.isfile(filepath):
logging.error(_('configuration path is not a file: %(filepath)s'), {'filepath':filepath})
return None
try:
from configparser import ConfigParser
except ImportError:
from backport.configparser import ConfigParser
cfgp = ConfigParser()
read_file = cfgp.read_file if hasattr(cfgp, "read_file") else cfgp.readfp
with open(filepath, encoding='utf-8') as fp:
read_file(fp)
dic = OrderedDict()
for section_name in cfgp.sections():
if 'DEFAULT' == section_name:
section_name = ''
for name, value in cfgp.items(section_name):
value += '' # inner workaround for python 2.6+
# transforms ascii str to unicode because
# of unicode_literals import
dic[Key(section_name) + name] = value
return Configuration.from_mapping(dic)
def write_to_file(cfg, filepath):
""" Write a configuration to the given file so that it's readable by
configparser.
"""
with open(filepath, mode='w', encoding='utf-8') as f:
def printf(s):
f.write(s + os.linesep)
lastsection = None
for prop in cfg.to_properties():
if prop.hidden:
continue
key, value, doc = (Key(prop.key), prop.value, prop.doc)
section, subkey = str(key.head), str(key.tail)
if section != lastsection:
lastsection = section
printf('%s[%s]' % (os.linesep, section,))
if doc:
printf('')
lines = util.phrase_to_lines(doc)
for line in lines:
printf('; %s' % (line,))
printf('%s = %s' % (subkey, value))
def from_dict(mapping):
'''Alias for :meth:`Configuration.from_mapping`.'''
return Configuration.from_mapping(mapping)
def from_list(properties):
'''Alias for :meth:`Configuration.from_properties`.'''
return Configuration.from_properties(properties)
def to_list(cfg):
'''Alias for :meth:`Configuration.to_properties`.'''
return cfg.to_properties()
class ConfigError(Exception):
"""Base class for configuration errors."""
def __init__(self, key, value=None, msg='', detail=''):
self.key = key
self.value = value
self.msg = msg % {'key': key, 'value': value}
self.detail = detail % {'key': key, 'value': value}
Exception.__init__(self, self.key, self.value, self.msg, self.detail)
def __repr__(self):
return "{cls}: {msg}, key:{key} value:{val}, {detail}".format(
cls=self.__class__.__name__,
key=repr(self.key),
val=repr(self.value),
msg=self.msg,
detail=self.detail,
)
def __str__(self):
detail = self.detail.strip() if hasattr(self, 'detail') else ''
if detail:
detail = ' ({0})'.format(detail)
return '{0}: {1}{2}'.format(self.__class__.__name__, self.msg, detail)
class ConfigNamingError(ConfigError):
"""Something is wrong with the name ('Key') of a config Property."""
def __init__(self, key, detail=''):
ConfigError.__init__(self, key, None,
'invalid key name: %(key)r', detail)
class ConfigKeyError(ConfigError, KeyError):
""" A config key does not exist. """
def __init__(self, key, detail=''):
ConfigError.__init__(self, key, None,
'key does not exist: %(key)r', detail)
class ConfigValueError(ConfigError, ValueError):
"""A configuration property does not accept a value."""
def __init__(self, key, value, detail=''):
ConfigError.__init__(self, key, value,
'invalid value: %(value)r', detail)
class ConfigWriteError(ConfigError):
"""Error while trying to change an existing configuration property."""
def __init__(self, key, value, detail=''):
ConfigError.__init__(self, key, value,
"can't write to %(key)s", detail)
def raising_error_handler(e):
"Simply raise the active exception."
raise
class error_collector(object):
""" Callable that can be used to collect errors of Configuration operations
instead of raising them.
"""
def __init__(self):
self.errors = []
def __call__(self, error):
self.errors.append(error)
def __len__(self):
return len(self.errors)
def __iter__(self):
return iter(self.errors)
class Key(object):
""" A hierarchical property name; alphanumerical and caseless.
Keys parts can contain ASCII letters, digits and `_`; they must start
with a letter and be separated by a `.`.
"""
_sep = '.'
_re = re.compile(r'^({name}({sep}{name})*)?$'.format(
name=r'[A-Za-z][A-Za-z0-9_]*',
sep=_sep,
))
def __init__(self, name=None):
""" name : Key or str
`None` means ''
"""
if None is name:
name = ''
elif isinstance(name, Key):
name = name._str
elif not isinstance(name, (str, type(''))):
raise ConfigNamingError(name, 'name must be a Key, str or unicode (is {type!r})'.format(type=type(name)))
elif not self._re.match(name):
raise ConfigNamingError(
name, 'Key parts must only contain the characters [A-Za-z0-9_],'
' start with a letter and be separated by a {seperator}'.format(seperator=self._sep))
name += '' # inner workaround for python 2.6+
# transforms ascii str to unicode because
# of unicode_literals import
self._str = name.lower()
def __repr__(self):
return '{0}({1!r})'.format(self.__class__.__name__, self._str)
def __str__(self):
return self._str
def __iter__(self):
"""Iterate over hierarchical key parts,"""
return iter(map(Key, self._str.split(self._sep)))
def __len__(self):
"""The number of non-empty hierarchical parts in this Key."""
return self._str.count(self._sep) + 1 if self._str else 0
def __add__(self, other):
"""Append something that can become a Key to a copy of this Key."""
other = Key(other)
if self and other:
return self._sep.join((self._str, other._str))
return Key(self or other)
def __radd__(self, other):
"""Make a Key of the left operand and add a copy of this key to it."""
return Key(other) + self
def __hash__(self):
return hash(self.normal)
def __eq__(self, other):
return self.normal == Key(other).normal
def __ne__(self, other):
return not (self == other)
@property
def parent(self):
""" This Key without its last hierarchical part; evaluates to `False`
if there are less than two parts in this Key.
"""
lastsep = self._str.rfind(self._sep)
if lastsep >= 0:
return Key(self._str[:lastsep])
return Key()
@property
def head(self):
""" The first hierarchical part of this Key."""
firstsep = self._str.find(self._sep)
if firstsep >= 0:
return Key(self._str[:firstsep])
return self
@property
def tail(self):
""" This key without its last hierarchical part; evaluates to `False`
if there are less than two parts in this Key.
"""
firstsep = self._str.find(self._sep)
if firstsep >= 0:
return Key(self._str[firstsep + 1:])
return Key()
@property
def normal(self):
"""The normal, hashable form of this Key to compare against."""
return self._str
class _PropertyMap(Mapping):
""" A map of keys to corresponding Properties; immutable, but can generate
updated copies of itself. Certain unset property attributes are
inherited from the property with the closest parent key. These
inherited attributes are: ``valid``, ``readonly`` and ``hidden``.
Uses the Property.replace mechanic to update existing properties.
"""
def __init__(self, properties=()):
dic = OrderedDict((p.key, p) for p in properties)
sortedkeys = sorted(dic, key=lambda k: Key(k).normal)
inherit = _InheritanceViewer(dic)
for key in sortedkeys:
dic[key] = inherit.property_with_inherited_attributes(key)
self._dic = dic
def __repr__(self):
return '{%s}' % (', '.join(
'%r: %r' % (k, v) for k, v in self._dic.items()))
def __len__(self):
return len(self._dic)
def __contains__(self, key):
return key in self._dic
def __iter__(self):
return iter(self._dic)
def __getitem__(self, key):
try:
return self._dic[key]
except KeyError:
raise ConfigKeyError(key)
def replace(self, properties, on_error):
def getnew(prop):
return self[prop.key].replace(**prop.to_dict())
return self._copy_with_new_properties(getnew, properties, on_error)
def update(self, properties, on_error):
def getnew(prop):
try:
return self[prop.key].replace(**prop.to_dict())
except KeyError:
return prop
return self._copy_with_new_properties(getnew, properties, on_error)
def _copy_with_new_properties(self, getnew, properties, on_error):
newdic = OrderedDict(self._dic)
for prop in properties:
try:
newprop = getnew(prop)
except ConfigError as error:
on_error(error)
continue
newdic[newprop.key] = newprop
return self.__class__(newdic.values())
class Property(namedtuple('PropertyTuple', 'key value type valid readonly hidden doc')):
""" A configuration Property with attributes for key (name), value, type,
validation and doc(umentation); immutable.
Use :meth:`replace` to return a new Property with changed attributes.
Attribute values of `None` are considered *not set*, and are the
default. They also have a special meaning to :meth:`replace`.
key : str
A string that acts as this Property's identifier (name).
value :
Anything goes that fits possible type or validity constraints,
except for `dict`s (and mappings in general); use hierarchical
keys to express those.
type :
The desired value type to auto-cast to; factually a constraint to
possible values. If `None` or an empty string, the property value
will remain unchanged.
valid : str or callable
A validity constraint on the value, applied after `type`. A
*callable* value will be called and the result evaluated in
boolean context, to decide if a value is valid. A *str* value will
be interpreted as a regular expression which the whole
``str()`` form of a value will be matched against.
readonly : bool
A readonly property will refuse any :meth"`replace` calls with a
:class:`ConfigWriteError`.
hidden : bool
Just a flag; interpretation is up to the user.
doc : str
A documentation string.
"""
def __new__(cls, key=None, value=None, type=None, valid=None, readonly=None,
hidden=None, doc=None):
try:
key = Key(key).normal
type = cls._get_valid_type(value, type)
valid = valid
value = cls._validate(valid, cls._to_type(type, value), type)
readonly = readonly
hidden = hidden
doc = doc
except ValueError as e:
raise ConfigValueError(key, value, detail=str(e))
return super(cls, cls).__new__(
cls, key, value, type, valid, readonly, hidden, doc)
@property
def _args(self):
"""The arguments needed to create this Property: ``(name, value)*``."""
for name in ('key', 'value', 'type', 'valid', 'readonly', 'hidden', 'doc'):
attr = getattr(self, name)
if attr is not None:
yield name, attr
def to_dict(self):
return dict(self._args)
def replace(self, **kwargs):
""" Return a new property as a copy of this property, with attributes
changed according to `kwargs`.
Generally, all attributes can be overridden if they are currently
unset (`None`). An exception is `value`, which will be overridden
by anything but `None`. Restrictions set by `type` and `valid`
apply.
"""
dic = self.to_dict()
dic.update(kwargs)
other = Property(**dic)
if self.key and other.key and self.key != other.key:
raise ConfigWriteError(self.key, other.key,
'new key must match old ({newkey!r} != {oldkey!r})'.format(
newkey=other.key, oldkey=self.key))
if self.readonly:
raise ConfigWriteError(self.key, other.value,
'is readonly ({value!r})'.format(value=self.value))
return Property(
key=self.key or other.key,
value=self._override_self('value', other),
type=self._override_other('type', other),
valid=self._override_other('valid', other),
readonly=self._override_other('readonly', other),
hidden=self._override_other('hidden', other),
doc=self._override_other('doc', other),
)
def _override_self(self, attrname, other):
""" Select the value of an attribute from self or another instance,
with preference to other."""
return self.__select_with_preference(other, self, attrname)
def _override_other(self, attrname, other):
""" Select the value of an attribute from self or another instance,
with preference to self."""
return self.__select_with_preference(self, other, attrname)
@staticmethod
def __select_with_preference(preferred, alt, attrname):
""" Select one of the values of an attribute to two objects, preferring
the first unless it holds `None`.
"""
preference = getattr(preferred, attrname, None)
alternative = getattr(alt, attrname, None)
return alternative if preference is None else preference
@staticmethod
def _get_valid_type(value, type_):
""" Turn the type argument into something useful. """
if type_ in (None, ''):
if type(value) in (bool, int, float, str, type('')):
type_ = type(value)
else:
return None
typestr = type_.__name__ if isinstance(type_, type) else str(type_)
typestr += '' # inner workaround for python 2.6+
# transforms ascii str to unicode because
# of unicode_literals import
if not typestr in Transformers:
return None
return typestr
@staticmethod
def _to_type(type_, value):
if value is None:
return value
try:
return Transformers[type_](value)
except TransformError:
raise ValueError('cannot transform value to type %s' % (type_,))
@classmethod
def _validate(cls, valid, value, type_):
if value is None:
return value
validator = cls._validator(valid)
return cls._validate_single_value(validator, value)
@classmethod
def _validate_single_value(cls, validator, value):
if not validator(value):
raise ValueError(validator.__name__)
return value
@classmethod
def _validator(cls, valid):
if callable(valid):
return valid
if not valid:
return lambda _: True
return cls._regexvalidator(valid)
@staticmethod
def _regexvalidator(valid):
def regex_validator(value):
testvalue = '' if value is None else str(value)
testvalue += '' # python2.6 compatibility
exp = valid.strip().lstrip('^').rstrip('$').strip()
exp = '^' + exp + '$'
if not re.match(exp, testvalue):
raise ValueError('value string must match {0!r}, is {1!r}'.format(exp, testvalue))
return True
return regex_validator
class _PropertyModel(object):
""" Objects whose __dict__ can be used to create a Property from;
calling it with a ``key`` argument will yield a nested model.
"""
# as class member to keep children out of instance __dict__s
_children = weakref.WeakKeyDictionary()
@staticmethod
def to_property(model):
return Property(**model.__dict__)
@classmethod
def model_family_to_properties(cls, parent_model):
return (Property(**m.__dict__) for m in cls._family(parent_model))
@classmethod
def _makechild(cls, parent, key):
child = cls(Key(parent.key) + key)
cls._children[parent].append(child)
return child
@classmethod
def _family(cls, root):
yield root
for child in itertools.chain(*[cls._family(c) for c in cls._children[root]]):
yield child
def __init__(self, key=None):
self._children[self] = []
self.key = Key(key).normal
def __getitem__(self, key):
return self._makechild(self, key)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class ConfigBuilder(object):
def __init__(self):
self.models = OrderedDict()
def __getitem__(self, key):
return self.models.setdefault(key, _PropertyModel(key))
def properties(self):
return itertools.chain(
*(_PropertyModel.model_family_to_properties(m) for m in self.models.values()))
def to_configuration(self):
return Configuration.from_properties(self.properties())
class Configuration(Mapping):
""" A mapping of keys to corresponding values, backed by a collection of
:class:`Property` objects.
Immutable; call :meth:`update` or :meth:`replace` with a mapping
argument to modify a copy of a configuration.
Unset Property attributes of ``valid``, ``readonly`` and ``hidden``
are overridden by those of a property with a "parent" key.
"""
@classmethod
def from_properties(cls, properties):
cfg = cls()
cfg.__propertymap = _PropertyMap(properties)
return cfg
def to_properties(self):
return self.__propertymap.values()
@classmethod
def from_mapping(cls, mapping):
properties = (Property(key, value) for key, value in mapping.items())
return cls.from_properties(properties)
def to_nested_dict(self):
d = {}
for key, value in self.items():
target = d
for part in Key(key):
target = target.setdefault(str(part), {})
if value is not None:
target[''] = self[key]
for key in self:
parent = None
target = d
for part in Key(key):
parent = target
target = target[str(part)]
if [''] == list(target):
parent[str(part)] = target.pop('')
return d
def __init__(self):
self.__propertymap = _PropertyMap()
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
tuple(self.__propertymap.values()))
def __contains__(self, key):
return key in self.__propertymap
def __len__(self):
return len(self.__propertymap)
def __iter__(self):
return iter(self.__propertymap)
def __getitem__(self, key):
return self.property(key).value
def property(self, key):
""" Return the property corresponding to the key argument or raise a
ConfigKeyError.
"""
return self.__propertymap[key]
def replace(self, mapping, on_error=raising_error_handler):
""" Return a copy of this configuration with some values replaced by
the corresponding values in the mapping argument; adding new keys
is not allowed.
Resulting ConfigErrors will be raised or passed to a callable
error handler, if given.
"""
return self._mutated_by(mapping, self.__propertymap.replace, on_error)
def update(self, mapping, on_error=raising_error_handler):
""" Return a copy of this configuration with some values replaced or
added corresponding to the values in the mapping argument.
Resulting ConfigErrors will be raised or passed to a callable
error handler, if given.
"""
return self._mutated_by(mapping, self.__propertymap.update, on_error)
def _mutated_by(self, mapping, mutator, on_error):
mutated = self.__class__()
properties = []
for key, value in mapping.items():
try:
properties.append(Property(key, value))
except ConfigError as e:
on_error(e)
mutated.__propertymap = mutator(properties, on_error)
return mutated
class _InheritanceViewer(object):
def __init__(self, propertymap):
self.propertymap = propertymap
def property_with_inherited_attributes(self, key):
property = self.propertymap[key]
model = _PropertyModel()
model.__dict__.update(property.to_dict())
self._inherit_attribute_if_not_set('valid', model)
self._inherit_attribute_if_not_set('readonly', model)
self._inherit_attribute_if_not_set('hidden', model)
return _PropertyModel.to_property(model)
def _inherit_attribute_if_not_set(self, attrname, model):
if getattr(model, attrname, None) is None:
key = Key(model.key).parent
value = None
while value is None and key:
try:
value = getattr(self.propertymap[key.normal], attrname, None)
except KeyError:
pass
key = key.parent
setattr(model, attrname, value)
Transformers = {}
def transformer(name, *more):
global Transformers # hell yeah!
def transformer_decorator(func):
Transformers[name] = func
for additional in more:
Transformers[additional] = func
return func
return transformer_decorator
class TransformError(Exception):
def __init__(self, transformername, val):
msg = ("Error while trying to parse value with transformer "
"'%s': %s" % (transformername, val))
super(self.__class__, self).__init__(msg)
@transformer(None)
def _identity(val=None):
return val
@transformer(name='bool')
def _to_bool_transformer(val=None):
if isinstance(val, (bool, int, float, complex, list, set, dict, tuple)):
return bool(val)
if isinstance(val, (type(''), str)):
if val.strip().lower() in ('yes', 'true', 'y', '1'):
return True
if val.strip().lower() in ('false', 'no', '', 'n', '0'):
return False
raise TransformError('bool', val)
@transformer('int')
def _to_int_transformer(val=None):
try:
return int(val)
except (TypeError, ValueError):
raise TransformError('int', val)
@transformer('float')
def _to_float_transformer(val=None):
try:
return float(val)
except (TypeError, ValueError):
raise TransformError('float', val)
@transformer('str', 'unicode')
def _to_str_transformer(val=None):
if val is None:
return ''
if isinstance(val, (str, type(''))):
return val.strip() + '' # inner workaround for python 2.6+
return str(val) + '' # transforms ascii str to unicode because
# of unicode_literals import
| 38,539 | Python | .py | 827 | 36.195889 | 120 | 0.60519 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,354 | util.py | devsnd_cherrymusic/cherrymusicserver/util.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
import os
import sys
import base64
import codecs
from cherrymusicserver import log
from time import time
PERFORMANCE_TEST = True
def timed(func):
"""decorator to time function execution and log result on DEBUG"""
def wrapper(*args, **kwargs):
starttime = time()
result = func(*args, **kwargs)
duration = time() - starttime
log.d('%s.%s: %.4f s', func.__module__, func.__name__, duration)
return result
return wrapper
def trim_to_maxlen(maxlen, s, insert=' ... '):
'''no sanity check for maxlen and len(insert)'''
if len(s) > maxlen:
keep = maxlen - len(insert)
left = keep // 2
right = keep - left
s = s[:left] + insert + s[-right:]
return s
def phrase_to_lines(phrase, length=80):
"""splits a string along whitespace and distributes the parts into
lines of the given length.
each paragraph is followed by a blank line, replacing all blank
lines separating the paragraphs in the phrase; if paragraphs get
squashed in your multiline strings, try inserting explicit newlines.
"""
import re
parag_ptn = r'''(?x) # verbose mode
(?: # non-capturing group:
[ \t\v\f\r]* # any non-breaking space
\n # linebreak
){2,} # at least two of these
'''
paragraphs = re.split(parag_ptn, phrase)
lines = []
for paragraph in paragraphs:
if not paragraph:
continue
words = paragraph.split()
line = ''
for word in words:
if len(line) + len(word) > length:
lines.append(line.rstrip())
line = ''
line += word + ' '
lines += [line.rstrip(), '']
return lines
def splittime(seconds):
'''converts time given in seconds into a tuple: (hours, mins, secs)'''
tmp = seconds
hh = tmp / 3600
tmp %= 3600
mm = tmp / 60
tmp %= 60
ss = tmp
return (hh, mm, ss)
def Property(func):
"""
decorator that allows defining acessors in place as local functions.
func must define fget, and may define fset, fdel and doc; `return locals()`
at the end.
Seen at http://adam.gomaa.us/blog/2008/aug/11/the-python-property-builtin/
"""
return property(**func())
from collections import deque
import math
class MovingAverage(object):
def __init__(self, size=15, fill=0):
assert size > 0
self._values = deque((fill for i in range(size)))
self._avg = fill
self._size = size
@property
def avg(self):
return self._avg
@property
def min(self):
return min(self._values)
@property
def max(self):
return max(self._values)
@property
def median(self):
sort = sorted(self._values)
mid = self._size // 2
median = sort[mid]
if self._size % 2:
return median
return (median + sort[mid - 1]) / 2
@property
def variance(self):
diff = []
mean = self.avg
[diff.append((x - mean) * (x - mean)) for x in self._values]
return sum(diff) / len(diff)
@property
def stddev(self):
return math.sqrt(self.variance)
def feed(self, val):
'''insert a new value and get back the new average'''
old = self._values.popleft()
try:
self._avg += (val - old) / self._size
except TypeError as tpe:
self._values.appendleft(old)
raise tpe
self._values.append(val)
return self._avg
class Performance:
indentation = 0
def __init__(self, text):
self.text = text
def __enter__(self):
global PERFORMANCE_TEST
if PERFORMANCE_TEST:
self.time = time()
Performance.indentation += 1
log.w('│ ' * (Performance.indentation - 1)
+ '╭──' + self.text)
return self
def __exit__(self, type, value, traceback):
global PERFORMANCE_TEST
if PERFORMANCE_TEST:
duration = (time() - self.time) * 1000
log.w('│ ' * (Performance.indentation-1)
+ '╰──%g ms' % (duration,))
Performance.indentation -= 1
def log(self, text):
global PERFORMANCE_TEST
if PERFORMANCE_TEST:
for line in text.split('\n'):
log.w('| ' * (Performance.indentation) + line)
def time2text(sec):
abssec = abs(sec)
minutes = abssec/60
hours = minutes/60
days = hours/24
weeks = days/7
months = days/30
years = months/12
if abssec > 30:
if sec > 0:
if int(years) != 0:
if years > 1:
return _('%d years ago') % years
else:
return _('a year ago')
elif int(months) != 0:
if months > 1:
return _('%d months ago') % months
else:
return _('a month ago')
elif int(weeks) != 0:
if weeks > 1:
return _('%d weeks ago') % weeks
else:
return _('a week ago')
elif int(days) != 0:
if days > 1:
return _('%d days ago') % days
else:
return _('a day ago')
elif int(hours) != 0:
if hours > 1:
return _('%d hours ago') % hours
else:
return _('an hour ago')
elif hours > 0.45:
return _('half an hour ago')
elif int(minutes) != 0:
if minutes > 1:
return _('%d minutes ago') % hours
else:
return _('a minute ago')
else:
return _('a few seconds ago')
else:
if int(years) != 0:
if years > 1:
return _('in %d years') % years
else:
return _('in a year')
elif int(months) != 0:
if months > 1:
return _('in %d months') % months
else:
return _('in a month')
elif int(weeks) != 0:
if weeks > 1:
return _('in %d weeks') % weeks
else:
return _('in a week')
elif int(days) != 0:
if days > 1:
return _('in %d days') % days
else:
return _('in a day')
elif int(hours) != 0:
if hours > 1:
return _('in %d hours') % hours
else:
return _('in an hour')
elif hours > 0.45:
return _('in half an hour')
elif int(minutes) != 0:
if minutes > 1:
return _('in %d minutes') % hours
else:
return _('in a minute')
else:
return _('in a few seconds')
else:
return _('just now')
class MemoryZipFile(object):
def __init__(self):
from io import BytesIO
from zipfile import ZipFile
self.buffer = BytesIO()
self.zip = ZipFile(self.buffer, 'w')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def writestr(self, name, bytes):
try:
self.zip.writestr(name, bytes)
except:
log.x(_('Error writing file %(name)r to memory zip'), {'name': name})
raise
def getbytes(self):
return self.buffer.getvalue()
def close(self):
self.zip.close()
| 9,043 | Python | .py | 270 | 24.225926 | 81 | 0.531849 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,355 | tweak.py | devsnd_cherrymusic/cherrymusicserver/tweak.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This file contains all static values that can be used to tweak the
program execution. All classes are static and only contain simple values.
To use this class, please import it using the fully classified module name, e.g
import cherrymusicserver.tweak
To account for changes while the server is running, reload the module
before using it:
reload(cherrymusicserver.tweak)
make sure to have reload imported as well:
from imp import reload
"""
class ResultOrderTweaks:
perfect_match_bonus = 100
partial_perfect_match_bonus = 30
starts_with_bonus = 10
folder_bonus = 5
word_in_file_name_bonus = 20
word_not_in_file_name_penalty = -30
word_in_file_path_bonus = 3
word_not_in_file_path_penalty = -10
class CherryModelTweaks:
result_order_debug = False
result_order_debug_files = 10
class SearchTweaks:
normal_file_search_limit = 400
| 1,980 | Python | .py | 54 | 34.444444 | 79 | 0.755741 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,356 | useroptiondb.py | devsnd_cherrymusic/cherrymusicserver/useroptiondb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import json
from cherrymusicserver import log
from cherrymusicserver import configuration as cfg
from cherrymusicserver import database as db
from cherrymusicserver.database.connect import BoundConnector
DBNAME = 'useroptions'
class UserOptionDB:
def __init__(self, connector=None):
"""user configuration:
hidden values can not be set by the user in the options,
but might be subject of being set automatically, e.g. the
heartbeat.
"""
db.require(DBNAME, '0')
c = cfg.ConfigBuilder()
with c['keyboard_shortcuts'] as kbs:
kbs.valid = '\d\d?\d?'
kbs['prev'].value = 89
kbs['play'].value = 88
kbs['pause'].value = 67
kbs['stop'].value = 86
kbs['next'].value = 66
kbs['search'].value = 83
with c['misc.show_playlist_download_buttons'] as pl_download_buttons:
pl_download_buttons.value = False
with c['misc.autoplay_on_add'] as autoplay_on_add:
autoplay_on_add.value = False
with c['custom_theme.primary_color'] as primary_color:
primary_color.value = '#F02E75'
primary_color.valid = '#[0-9a-fA-F]{6}'
with c['custom_theme.white_on_black'] as white_on_black:
white_on_black.value = False
with c['media.may_download'] as may_download:
may_download.value = False
with c['media.force_transcode_to_bitrate'] as force_transcode:
force_transcode.value = 0
force_transcode.valid = '0|48|64|96|128|320'
with c['ui.confirm_quit_dialog'] as confirm_quit_dialog:
confirm_quit_dialog.value = True
with c['ui.display_album_art'] as display_album_art:
display_album_art.value = True
with c['last_time_online'] as last_time_online:
last_time_online.value = 0
last_time_online.valid = '\\d+'
last_time_online.hidden = True
last_time_online.doc = "UNIX TIME (1.1.1970 = never)"
self.DEFAULTS = c.to_configuration()
self.conn = BoundConnector(DBNAME, connector).connection()
def getOptionFromMany(self, key, userids):
result = {}
for userid in userids:
val = self.useroptiondb.conn.execute(
'''SELECT value FROM option WHERE userid = ? AND name = ?''',
(userid, key,)).fetchone()
if val:
result[userid] = val
else:
result[userid] = self.DEFAULTS[key]
return result
def forUser(self, userid):
return UserOptionDB.UserOptionProxy(self, userid)
class UserOptionProxy:
def __init__(self, useroptiondb, userid):
self.useroptiondb = useroptiondb
self.userid = userid
def getChangableOptions(self):
opts = self.getOptions()
visible_props = (p for p in opts.to_properties() if not p.hidden)
return cfg.from_list(visible_props).to_nested_dict()
def getOptions(self):
results = self.useroptiondb.conn.execute(
'''SELECT name, value FROM option WHERE userid = ?''',
(self.userid,)).fetchall()
useropts = dict((r[0], json.loads(r[1])) for r in results)
return self.useroptiondb.DEFAULTS.replace(
useropts,
on_error=self.delete_bad_option)
def getOptionValue(self, key):
return self.getOptions()[key]
def setOption(self, key, value):
opts = self.getOptions().replace({key: value})
self.setOptions(opts)
def setOptions(self, c):
for k in cfg.to_list(c):
value = json.dumps(k.value)
key = k.key
sel = self.useroptiondb.conn.execute(
'''SELECT name, value FROM option
WHERE userid = ? AND name = ?''',
(self.userid, key)).fetchone()
if sel:
self.useroptiondb.conn.execute(
'''UPDATE option SET value = ?
WHERE userid = ? AND name = ?''',
(value, self.userid, key))
else:
self.useroptiondb.conn.execute(
'''INSERT INTO option (userid, name, value) VALUES
(?,?,?)''', (self.userid, key, value))
self.useroptiondb.conn.commit()
def deleteOptionIfExists(self, key):
stmt = """DELETE FROM option WHERE userid = ? AND name = ?;"""
with self.useroptiondb.conn as conn:
conn.execute(stmt, (self.userid, key))
def delete_bad_option(self, error):
self.deleteOptionIfExists(error.key)
log.warning('deleted bad option %r for userid %r (%s)',
error.key, self.userid, error.msg)
| 6,077 | Python | .py | 138 | 33.862319 | 78 | 0.59669 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,357 | albumartfetcher.py | devsnd_cherrymusic/cherrymusicserver/albumartfetcher.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
try:
import urllib.request
import urllib.parse
except ImportError:
import backport.urllib as urllib
import os.path
import codecs
import re
import subprocess
from tinytag import TinyTag
from cherrymusicserver import log
#unidecode is opt-dependency
try:
from unidecode import unidecode
except ImportError:
unidecode = lambda x: x
pillowAvailable = False
try:
from PIL import Image
from io import BytesIO
pillowAvailable = True
except ImportError:
pass
def programAvailable(name):
"""
check if a program is available in the system PATH
"""
try:
with open(os.devnull, 'w') as devnull:
process = subprocess.Popen([name], stdout=subprocess.PIPE,
stderr=devnull)
out, err = process.communicate()
return 'ImageMagick' in codecs.decode(out, 'UTF-8')
except OSError:
return False
class AlbumArtFetcher:
"""
provide the means to fetch images from different web services by
searching for certain keywords
"""
imageMagickAvailable = programAvailable('convert')
methods = {
'itunes': {
'url': "http://ax.itunes.apple.com/WebObjects/MZStoreServices.woa/wa/wsSearch?entity=album&term=",
'regexes': [
'artworkUrl60":"([^"]+)"',
],
},
'amazon': {
'url': "http://www.amazon.com/s/?field-keywords=",
'regexes': [
'<img[^>]+?alt="Product Details"[^>]+?src="([^"]+)"',
'<img[^>]+?src="([^"]+)"[^>]+?alt="Product Details"',
'<img[^>]+?src="([^"]+)"[^>]+?class="s-access-image',
'<img[^>]+?src="([^"]+)"[^>]+?data-search-image',
],
},
'bestbuy.com': {
'url': 'http://www.bestbuy.com/site/searchpage.jsp?_dyncharset=UTF-8&id=pcat17071&st=',
'regexes': ['<div class="thumb".+?<img.+?src="([^"]+)"'],
'user_agent': 'curl/7.52.1',
},
'bandcamp': {
'url': "https://bandcamp.com/search?q=",
'regexes': ['<div class="art".+?<img src="([^"]+)"']
}
# buy.com is now rakuten.com
# with a new search API that nobody bothered to figure out yet
# 'buy.com': {
# 'url': "http://www.buy.com/sr/srajax.aspx?from=2&qu=",
# 'regexes': [' class="productImageLink"><img src="([^"]*)"']
# },
}
def __init__(self, method='itunes', timeout=10):
"""define the urls of the services and a regex to fetch images
"""
self.MAX_IMAGE_SIZE_BYTES = 100*1024
self.IMAGE_SIZE = 80
# the GET parameter value of the searchterm must be appendable
# to the urls defined in "methods".
if not method in self.methods:
log.e(_(('''unknown album art fetch method: '%(method)s', '''
'''using default.''')),
{'method': method})
method = 'itunes'
self.method = method
self.timeout = timeout
def resize(self, imagepath, size):
"""
resize an image using image magick or pillow
Returns:
the binary data of the image and a matching http header
"""
with open(imagepath, 'rb') as fh:
return self.resize_image_data(fh.read(), size)
def resize_image_data(self, image_data, size):
"""
resize an image as BytesIO using pillow or image magick
Returns:
the binary data of the image and a matching http header
"""
if pillowAvailable:
input_image = BytesIO()
input_image.write(image_data)
input_image.seek(0)
image = Image.open(input_image)
image.thumbnail(size, Image.ANTIALIAS)
image_data = BytesIO()
image.save(image_data, "JPEG")
image_byte_count = image_data.tell()
image_data.seek(0)
return (
{
'Content-Type': "image/jpeg",
'Content-Length': image_byte_count
},
image_data.read()
)
if AlbumArtFetcher.imageMagickAvailable:
cmd = ['convert', '-',
'-resize', str(size[0])+'x'+str(size[1]),
'jpeg:-']
im = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data, err = im.communicate(image_data)
header = {'Content-Type': "image/jpeg",
'Content-Length': len(data)}
return header, data
return None, ''
def fetchurls(self, searchterm):
"""fetch image urls based on the provided searchterms
Returns:
list of urls
"""
# choose the webservice to retrieve the images from
method = self.methods[self.method]
user_agent = method.get('user_agent')
# use unidecode if it's available
searchterm = unidecode(searchterm).lower()
# the keywords must always be appenable to the method-url
url = method['url']+urllib.parse.quote(searchterm)
#download the webpage and decode the data to utf-8
html = codecs.decode(self.retrieveData(url, user_agent)[0], 'UTF-8')
# fetch all urls in the page
matches = []
for regex in method['regexes']:
matches += re.findall(regex, html, re.DOTALL)
return matches
def fetch(self, searchterm):
"""
fetch an image using the provided search term
encode the searchterms and retrieve an image from one of the
image providers
Returns:
an http header and binary data
"""
matches = self.fetchurls(searchterm)
if matches:
imgurl = matches[0]
if 'urltransformer' in self.method:
imgurl = method['urltransformer'](imgurl)
if imgurl.startswith('//'):
imgurl = 'http:'+imgurl
raw_data, header = self.retrieveData(imgurl)
return header, raw_data
else:
return None, ''
def retrieveData(self, url, user_agent=None):
"""
use a fake user agent to retrieve data from a webaddress
Returns:
the binary data and the http header of the request
"""
if not user_agent:
user_agent = ('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 '
'(KHTML, like Gecko) Ubuntu/12.04 '
'Chromium/18.0.1025.168 Chrome/18.0.1025.168 '
'Safari/535.19')
req = urllib.request.Request(url, headers={'User-Agent': user_agent})
urlhandler = urllib.request.urlopen(req, timeout=self.timeout)
return urlhandler.read(), urlhandler.info()
def fetchLocal(self, path):
""" search a local path for image files.
@param path: directory path
@type path: string
@return header, imagedata, is_resized
@rtype dict, bytestring"""
fetchers = (self._fetch_folder_image, self._fetch_embedded_image)
for fetcher in fetchers:
header, data, resized = fetcher(path)
if data:
break
return header, data, resized
def _fetch_folder_image(self, path):
filetypes = (".jpg", ".jpeg", ".png")
try:
for file_in_dir in sorted(os.listdir(path)):
if not file_in_dir.lower().endswith(filetypes):
continue
try:
imgpath = os.path.join(path, file_in_dir)
if os.path.getsize(imgpath) > self.MAX_IMAGE_SIZE_BYTES:
header, data = self.resize(imgpath,
(self.IMAGE_SIZE,
self.IMAGE_SIZE))
return header, data, True
else:
with open(imgpath, "rb") as f:
data = f.read()
if(imgpath.lower().endswith(".png")):
mimetype = "image/png"
else:
mimetype = "image/jpeg"
header = {'Content-Type': mimetype,
'Content-Length': len(data)}
return header, data, False
except IOError:
return None, '', False
except OSError:
return None, '', False
return None, '', False
def _fetch_embedded_image(self, path):
filetypes = ('.mp3',)
max_tries = 3
header, data, resized = None, '', False
try:
files = os.listdir(path)
files = (f for f in files if f.lower().endswith(filetypes))
for count, file_in_dir in enumerate(files, start=1):
if count > max_tries:
break
filepath = os.path.join(path, file_in_dir)
try:
tag = TinyTag.get(filepath, image=True)
image_data = tag.get_image()
except IOError:
continue
if not image_data:
continue
_header, _data = self.resize_image_data(
image_data, (self.IMAGE_SIZE, self.IMAGE_SIZE))
if _data:
header, data, resized = _header, _data, True
break
except OSError:
pass
return header, data, resized
| 11,003 | Python | .py | 279 | 28 | 110 | 0.542387 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,358 | metainfo.py | devsnd_cherrymusic/cherrymusicserver/metainfo.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from cherrymusicserver import log
import sys
from tinytag import TinyTag
class Metainfo():
def __init__(self, artist='', album='', title='', track='', length=0):
self.artist = artist
self.album = album
self.title = title
self.track = track
self.length = length
def dict(self):
return {
'artist': self.artist,
'album': self.album,
'title': self.title,
'track': self.track,
'length': self.length
}
def getSongInfo(filepath):
try:
tag = TinyTag.get(filepath)
except LookupError:
return Metainfo()
# make sure everthing returned (except length) is a string
for attribute in ['artist','album','title','track']:
if getattr(tag, attribute) is None:
setattr(tag, attribute, '')
return Metainfo(tag.artist, tag.album, tag.title, str(tag.track), tag.duration)
| 2,022 | Python | .py | 58 | 30.62069 | 83 | 0.68456 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,359 | cherrymodel.py | devsnd_cherrymusic/cherrymusicserver/cherrymodel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This class is the heart-piece of the program and
will delegate different calls between other classes.
"""
from __future__ import unicode_literals
import os
from random import choice
import codecs
import json
import cherrypy
import audiotranscode
try:
from imp import reload
except ModuleNotFoundError:
from importlib import reload
try:
from urllib.parse import quote
except ImportError:
from backport.urllib.parse import quote
try:
import urllib.request
except ImportError:
import backport.urllib as urllib
import cherrymusicserver as cherry
from cherrymusicserver import service
from cherrymusicserver import pathprovider
from cherrymusicserver.util import Performance
from cherrymusicserver import resultorder
from cherrymusicserver import log
# used for sorting
NUMBERS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
@service.user(cache='filecache')
class CherryModel:
def __init__(self):
CherryModel.NATIVE_BROWSER_FORMATS = ['opus', 'ogg', 'mp3']
CherryModel.supportedFormats = CherryModel.NATIVE_BROWSER_FORMATS[:]
if cherry.config['media.transcode']:
self.transcoder = audiotranscode.AudioTranscode()
formats = CherryModel.supportedFormats
formats += self.transcoder.available_decoder_formats()
CherryModel.supportedFormats = list(set(formats))
@classmethod
def abspath(cls, path):
return os.path.join(cherry.config['media.basedir'], path)
@classmethod
def fileSortFunc(cls, filepath):
upper = pathprovider.filename(filepath).upper().strip()
return upper
@classmethod
def fileSortFuncNum(cls, filepath):
upper = CherryModel.fileSortFunc(filepath)
# check if the filename starts with a number
if upper.startswith(NUMBERS):
# find index of the first non numerical character:
non_number_index = 0
for idx, char in enumerate(upper):
if not char in NUMBERS:
break
else:
non_number_index += 1
# make sure that numbers are sorted correctly by evening out
# the number in the filename 0-padding up to 5 digits.
return '0'*(5 - non_number_index) + upper
return upper
def sortFiles(self, files, fullpath='', number_ordering=False):
# sort alphabetically (case insensitive)
if number_ordering:
# make sure numbers are sorted correctly
sortedfiles = sorted(files, key=CherryModel.fileSortFuncNum)
else:
sortedfiles = sorted(files, key=CherryModel.fileSortFunc)
if fullpath:
#sort directories up
isfile = lambda x: os.path.isfile(os.path.join(fullpath, x))
sortedfiles = sorted(sortedfiles, key=isfile)
return sortedfiles
def listdir(self, dirpath, filterstr=''):
if dirpath is None:
absdirpath = cherry.config['media.basedir']
else:
absdirpath = CherryModel.abspath(dirpath)
if cherry.config['browser.pure_database_lookup']:
allfilesindir = self.cache.listdir(dirpath) # NOT absdirpath!
else:
in_basedir = (os.path.normpath(absdirpath)+'/').startswith(
cherry.config['media.basedir'])
if not in_basedir:
raise ValueError('dirpath not in basedir: %r' % dirpath)
try:
allfilesindir = os.listdir(absdirpath)
except OSError as e:
log.e(_('Error listing directory %s: %s') % (absdirpath, str(e)))
allfilesindir = []
#remove all files not inside the filter
if filterstr:
filterstr = filterstr.lower()
allfilesindir = [f for f in allfilesindir
if f.lower().startswith(filterstr)]
else:
allfilesindir = [f for f in allfilesindir if not f.startswith('.')]
musicentries = []
maximum_shown_files = cherry.config['browser.maxshowfiles']
compactlisting = len(allfilesindir) > maximum_shown_files
if compactlisting:
upper_case_files = [x.upper() for x in allfilesindir]
filterstr = os.path.commonprefix(upper_case_files)
filterlength = len(filterstr)+1
currentletter = '/' # impossible first character
# don't care about natural number order in compact listing
sortedfiles = self.sortFiles(allfilesindir, number_ordering=False)
for dir in sortedfiles:
filter_match = dir.upper().startswith(currentletter.upper())
if filter_match and not len(currentletter) < filterlength:
continue
else:
currentletter = dir[:filterlength]
#if the filter equals the foldername
if len(currentletter) == len(filterstr):
subpath = os.path.join(absdirpath, dir)
CherryModel.addMusicEntry(subpath, musicentries)
else:
musicentries.append(
MusicEntry(strippath(absdirpath),
repr=currentletter,
compact=True))
else:
# enable natural number ordering for real directories and files
sortedfiles = self.sortFiles(allfilesindir, absdirpath,
number_ordering=True)
for dir in sortedfiles:
subpath = os.path.join(absdirpath, dir)
CherryModel.addMusicEntry(subpath, musicentries)
if cherry.config['media.show_subfolder_count']:
for musicentry in musicentries:
musicentry.count_subfolders_and_files()
return musicentries
@classmethod
def addMusicEntry(cls, fullpath, list):
relpath = strippath(fullpath)
if os.path.isdir(fullpath):
list.append(MusicEntry(relpath, dir=True))
else:
if CherryModel.isplayable(fullpath):
list.append(MusicEntry(relpath))
def updateLibrary(self):
self.cache.full_update()
return True
def file_size_within_limit(self, filelist, maximum_download_size):
acc_size = 0
for f in filelist:
acc_size += os.path.getsize(CherryModel.abspath(f))
if acc_size > maximum_download_size:
return False
return True
def search(self, term):
reload(cherry.tweak)
tweaks = cherry.tweak.CherryModelTweaks
user = cherrypy.session.get('username', None)
if user:
log.d(_("%(user)s searched for '%(term)s'"), {'user': user, 'term': term})
max_search_results = cherry.config['search.maxresults']
results = self.cache.searchfor(term, maxresults=max_search_results)
with Performance(_('sorting DB results using ResultOrder')) as perf:
debug = tweaks.result_order_debug
order_function = resultorder.ResultOrder(term, debug=debug)
results = sorted(results, key=order_function, reverse=True)
results = results[:min(len(results), max_search_results)]
if debug:
n = tweaks.result_order_debug_files
for sortedResults in results[:n]:
perf.log(sortedResults.debugOutputSort)
for sortedResults in results:
sortedResults.debugOutputSort = None # free ram
with Performance(_('checking and classifying results:')):
results = list(filter(CherryModel.isValidMediaEntry, results))
if cherry.config['media.show_subfolder_count']:
for result in results:
result.count_subfolders_and_files()
return results
def check_for_updates(self):
try:
url = 'http://fomori.org/cherrymusic/update_check.php?version='
url += cherry.__version__
urlhandler = urllib.request.urlopen(url, timeout=5)
jsondata = codecs.decode(urlhandler.read(), 'UTF-8')
versioninfo = json.loads(jsondata)
return versioninfo
except Exception as e:
log.e(_('Error fetching version info: %s') % str(e))
return []
def motd(self):
artist = ['Hendrix',
'Miles Davis',
'James Brown',
'Nina Simone',
'Mozart',
'Bach',
'John Coltraine',
'Jim Morrison',
'Frank Sinatra',
'Django Reinhardt',
'Kurt Cobain',
'Thom Yorke',
'Vivaldi',
'Bob Dylan',
'Johnny Cash',
'James Brown',
'Bob Marley',
'Björk']
liquid = ['2 liters of olive oil',
'a glass of crocodile tears',
'a bowl of liquid cheese',
'some battery acid',
'cup of grog',
]
search = ['{artist} can turn diamonds into jelly-beans.',
'The french have some really stinky cheese. It\'s true.',
'{artist} used to eat squids for breakfast.',
'The GEMA wont let me hear {artist}.',
'If {artist} had played with {artist}, they would have made bazillions!',
'{artist} actually stole everything from {artist}.',
'{artist} really liked to listen to {artist}.',
'{artist}\'s music played backwards is actually the same as {artist}. This is how they increased their profit margin!',
'{artist} always turned the volume up to 11.',
'If {artist} made Reggae it sounded like {artist}.',
'{artist} backwards is "{revartist}".',
'2 songs of {artist} are only composed of haikus.',
'{artist} drank {liquid} each morning, sometimes even twice a day.',
'Instead of soap, {artist} used {liquid} to shower.',
'{artist} had a dog the size of {artist}.',
'{artist} was once sued by {artist} for eating all the cake.',
'{artist} named his cat after {artist}. It died two years later by drowning in {liquid}.',
'{artist} once founded a gang, but then had to quit becaus of the pirates. All former gang members became squirrels.',
'{artist}, a.k.a. "Quadnostril" actually had 2 noses. This meant that it was quite hard to be taken seriously.',
'Never put {liquid} and {artist} in the same room. Never ever!',
'{artist} lived twice, once as a human, once as a duck.',
'Nobody ever thought {artist} would still be famous after the great goat-cheese-fiasco.',
'For a long time, nobody knew that {artist} secretly loved wall sockets.',
'In the beginning {artist} was very poor and had to auction off a pinky toe. It is still exhibited in the "museum of disgusting stuff" in paris.',
'{artist} did never mind if somebody made weird noises. Occasionally this was the inspiration for a new song.',
'While creating a huge camp fire {artist} lost all hair. It took years for it to regrow.',
'A rooster isn\'t necessarily better than a balloon. However, {artist} found out that balloons are less heavy.',
'Instead of cars, snow mobiles are often used to move around in the alps. This information has no relevance whatsoever.',
'Creating new life-forms always was a hobby of {artist}. The greatest success was the creation of {artist}.',
]
oneliner = choice(search)
while '{artist}' in oneliner:
a = choice(artist)
oneliner = oneliner.replace('{artist}', a, 1)
if '{revartist}' in oneliner:
oneliner = oneliner.replace('{revartist}', a.lower()[::-1])
if '{liquid}' in oneliner:
oneliner = oneliner.replace('{liquid}', choice(liquid))
return oneliner
def randomMusicEntries(self, count):
loadCount = int(count * 1.5) + 1 # expect 70% valid entries
entries = self.cache.randomFileEntries(loadCount)
filteredEntries = list(filter(CherryModel.isValidMediaEntry, entries))
return filteredEntries[:count]
@classmethod
def isValidMediaEntry(cls, file):
" only existing directories and playable files are valid"
file.path = strippath(file.path)
if file.path.startswith('.'):
return False
abspath = CherryModel.abspath(file.path)
if file.dir:
return os.path.isdir(abspath)
else:
return CherryModel.isplayable(abspath)
@classmethod
def isplayable(cls, fullpath):
'''Checks if the file extension is in the configured 'playable' list and
if the file exists, is indeed a file, and has content.
'''
path = fullpath
ext = os.path.splitext(path)[1][1:]
is_supported_ext = ext and ext.lower() in CherryModel.supportedFormats
is_nonempty_file = os.path.isfile(path) and bool(os.path.getsize(path))
return is_supported_ext and is_nonempty_file
def strippath(path):
if path.startswith(cherry.config['media.basedir']):
return os.path.relpath(path, cherry.config['media.basedir'])
return path
class MusicEntry:
# maximum number of files to be iterated inside of a folder to
# check if there are playable meadia files or other folders inside
MAX_SUB_FILES_ITER_COUNT = 100
def __init__(self, path, compact=False, dir=False, repr=None, subdircount=0, subfilescount=0):
self.path = path
self.compact = compact
self.dir = dir
self.repr = repr
# number of directories contained inside
self.subdircount = subdircount
# number of files contained inside
self.subfilescount = subfilescount
# True when the exact amount of files is too big and is estimated
self.subfilesestimate = False
def count_subfolders_and_files(self):
if self.dir:
self.subdircount = 0
self.subfilescount = 0
fullpath = CherryModel.abspath(self.path)
if not os.path.isdir(fullpath):
# not a dir, or not even there: fail gracefully.
# There are 0 subfolders and 0 files by default.
log.error(
"MusicEntry does not exist: %r", self.path)
return
try:
directory_listing = os.listdir(fullpath)
except OSError as e:
log.e(_('Error listing directory %s: %s') % (fullpath, str(e)))
directory_listing = []
for idx, filename in enumerate(directory_listing):
if idx > MusicEntry.MAX_SUB_FILES_ITER_COUNT:
# estimate remaining file count
self.subfilescount *= len(directory_listing)/float(idx+1)
self.subfilescount = int(self.subfilescount)
self.subdircount *= len(directory_listing)/float(idx+1)
self.subdircount = int(self.subdircount)
self.subfilesestimate = True
return
subfilefullpath = os.path.join(fullpath, filename)
if os.path.isfile(subfilefullpath):
if CherryModel.isplayable(subfilefullpath):
self.subfilescount += 1
else:
self.subdircount += 1
def to_dict(self):
if self.compact:
#compact
return {'type': 'compact',
'urlpath': self.path,
'label': self.repr}
elif self.dir:
#dir
simplename = pathprovider.filename(self.path)
return {'type': 'dir',
'path': self.path,
'label': simplename,
'foldercount': self.subdircount,
'filescount': self.subfilescount,
'filescountestimate': self.subfilesestimate }
else:
#file
simplename = pathprovider.filename(self.path)
urlpath = quote(self.path.encode('utf8'))
return {'type': 'file',
'urlpath': urlpath,
'path': self.path,
'label': simplename}
def __repr__(self):
return "<MusicEntry path:%s, dir:%s>" % (self.path, self.dir)
| 18,039 | Python | .py | 384 | 35.231771 | 164 | 0.598593 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,360 | sqlitecache.py | devsnd_cherrymusic/cherrymusicserver/sqlitecache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
import os
import re
import sqlite3
import sys
import traceback
from backport.collections import deque, Counter
from contextlib import closing
from operator import itemgetter
try:
from imp import reload
except ModuleNotFoundError:
from importlib import reload
import cherrymusicserver as cherry
from cherrymusicserver import database
from cherrymusicserver import log
from cherrymusicserver import service
from cherrymusicserver import util
from cherrymusicserver.cherrymodel import MusicEntry
from cherrymusicserver.database.connect import BoundConnector
from cherrymusicserver.util import Performance
from cherrymusicserver.progress import ProgressTree, ProgressReporter
import cherrymusicserver.tweak
import random
from backport import unichr
UNIDECODE_AVAILABLE = True
try:
import unidecode
except ImportError:
UNIDECODE_AVAILABLE = False
scanreportinterval = 1
AUTOSAVEINTERVAL = 100
debug = True
keepInRam = False
#if debug:
# log.level(log.DEBUG)
DBNAME = 'cherry.cache'
# unidecode will transform umlauts etc to their ASCII equivalent by
# stripping the accents. This is a simple table for other common
# transformations not performed by unidecode
SPECIAL_LETTER_TRANSFORMS = {
'ä': 'ae',
'ö': 'oe',
'ü': 'ue',
}
class SQLiteCache(object):
def __init__(self, connector=None):
database.require(DBNAME, version='1')
self.normalize_basedir()
connector = BoundConnector(DBNAME, connector)
self.DBFILENAME = connector.dblocation
self.conn = connector.connection()
self.db = self.conn.cursor()
#I don't care about journaling!
with closing(self.conn.execute('PRAGMA synchronous = OFF')):
pass
with closing(self.conn.execute('PRAGMA journal_mode = MEMORY')):
pass
self.load_db_to_memory()
def file_db_in_memory(self):
return not self.DBFILENAME == ':memory:' and cherry.config['search.load_file_db_into_memory']
def load_db_to_memory(self):
if self.file_db_in_memory():
self.file_db_mem = MemoryDB(self.DBFILENAME, 'files')
self.file_db_mem.db.execute('CREATE INDEX IF NOT EXISTS idx_files_parent'
' ON files(parent)')
@classmethod
def searchterms(cls, searchterm):
searchterm = searchterm.replace('_', ' ').replace('%',' ')
words = [
word.lower() for word in
re.findall('(\w+|[^\s\w]+)', searchterm, re.UNICODE)
]
if UNIDECODE_AVAILABLE:
unidecoded = [unidecode.unidecode(word) for word in words]
words += unidecoded
special_transforms = []
for word in words:
if any(char in word for char in SPECIAL_LETTER_TRANSFORMS.keys()):
for char, substitute in SPECIAL_LETTER_TRANSFORMS.items():
word = word.replace(char, substitute)
special_transforms.append(word)
words += special_transforms
return set(words)
def fetchFileIds(self, terms, maxFileIdsPerTerm, mode):
"""returns list of ids each packed in a tuple containing the id"""
assert '' not in terms, _("terms must not contain ''")
resultlist = []
for term in terms:
tprefix, tlast = term[:-1], term[-1]
query = '''SELECT search.frowid FROM dictionary JOIN search ON search.drowid = dictionary.rowid WHERE '''
if sys.maxunicode <= ord(tlast):
where = ''' dictionary.word LIKE ? '''
params = (term + '%',)
else:
where = ''' (dictionary.word >= ? AND dictionary.word < ?) '''
nextchr = unichr(1 + ord(tlast))
params = (term, tprefix + nextchr)
order = ' ORDER BY dictionary.occurrences DESC '
limit = ' LIMIT 0, ' + str(maxFileIdsPerTerm) #TODO add maximum db results as configuration parameter
sql = query + where + order +limit
if debug:
log.d('Search term: %r', term)
log.d('Query used: %r, %r', sql, params)
#print(self.conn.execute('EXPLAIN QUERY PLAN ' + sql, params).fetchall())
self.db.execute(sql, params)
resultlist += [t[0] for t in self.db.fetchall()]
return resultlist
def searchfor(self, value, maxresults=10):
mode = 'normal'
if value.startswith('!f '):
mode = 'fileonly'
value = value[3:]
elif value.endswith(' !f'):
mode = 'fileonly'
value = value[:-3]
elif value.startswith('!d '):
mode = 'dironly'
value = value[3:]
elif value.endswith(' !d'):
mode = 'dironly'
value = value[:-3]
reload(cherrymusicserver.tweak)
file_search_limit = cherrymusicserver.tweak.SearchTweaks.normal_file_search_limit
terms = SQLiteCache.searchterms(value)
with Performance(_('searching for a maximum of %s files') % str(file_search_limit * len(terms))):
if debug:
log.d('searchterms')
log.d(terms)
results = []
maxFileIdsPerTerm = file_search_limit
with Performance(_('file id fetching')):
fileids = self.fetchFileIds(terms, maxFileIdsPerTerm, mode)
if len(fileids) > file_search_limit:
with Performance(_('sorting results by fileid occurrences')):
# sort items by occurrences and only return maxresults
fileids = [
fid[0] for fid in
Counter(fileids).most_common(file_search_limit)
]
if mode == 'normal':
with Performance(_('querying fullpaths for %s fileIds') % len(fileids)):
results += self.musicEntryFromFileIds(fileids)
else:
with Performance(_('querying fullpaths for %s fileIds, files only') % len(fileids)):
results += self.musicEntryFromFileIds(fileids,mode=mode)
if debug:
log.d('resulting paths')
log.d(results)
return results
def listdir(self, path):
basedir = cherry.config['media.basedir']
targetpath = os.path.join(basedir, path)
targetdir = self.db_find_file_by_path(targetpath)
if targetdir is None:
log.e(_('media cache cannot listdir %r: path not in database'), path)
return []
return [f.basename for f in self.fetch_child_files(targetdir)]
def randomFileEntries(self, count):
''' Return a number of random entries from the file cache.
The actual number returned may be less than ``count`` if the
database does not contain enough entries or if randomization hits
directory entries or entries that have been deleted.
'''
assert count >= 0
cursor = self.conn.cursor()
minId = cursor.execute('''SELECT _id FROM files ORDER BY _id ASC LIMIT 1;''').fetchone()
if minId is None:
return () # database is empty
minId = minId[0]
maxId = cursor.execute('''SELECT _id FROM files ORDER BY _id DESC LIMIT 1;''').fetchone()[0]
if sys.version_info < (3,):
genrange = xrange # use generator, not a large list
else:
genrange = range
if maxId - minId < count:
file_ids = genrange(minId, maxId + 1)
else:
# range generator pays off:
file_ids = random.sample(genrange(minId, maxId + 1), count)
entries = self.musicEntryFromFileIds(file_ids, mode='fileonly')
random.shuffle(entries)
return entries
def musicEntryFromFileIds(self, filerowids, incompleteMusicEntries=None, mode='normal'):
reload(cherrymusicserver.tweak)
file_search_limit = cherrymusicserver.tweak.SearchTweaks.normal_file_search_limit
#incompleteMusicEntries maps db parentid to incomplete musicEntry
assert mode in ('normal', 'dironly', 'fileonly'), mode
if incompleteMusicEntries is None:
incompleteMusicEntries = {}
musicEntries = [] #result list
if self.file_db_in_memory():
db = self.file_db_mem.db
else:
db = self.conn
cursor = db.cursor()
sqlquery = ''' SELECT rowid, parent, filename, filetype, isdir
FROM files WHERE rowid IN ({ids})'''.format(
ids=', '.join('?' * len(filerowids)))
sqlparams = tuple(filerowids)
if not incompleteMusicEntries:
#only filter 1st recursion level
if mode != 'normal':
sqlquery += ' AND isdir = ?'
sqlparams += ('dironly' == mode,)
sqlquery += ' LIMIT 0, ?'
sqlparams += (file_search_limit,)
cursor.execute(sqlquery, sqlparams)
for id, parent_id, filename, fileext, isdir in cursor.fetchall():
path = filename + fileext
#check if fetched row is parent of existing entry
if id in incompleteMusicEntries:
#remove item and map to new parent id
entries = incompleteMusicEntries.pop(id)
for entry in entries:
entry.path = os.path.join(path, entry.path)
else:
#id is not parent of any entry, so make a new one
entries = [MusicEntry(path, dir=bool(isdir))]
if parent_id == -1:
#put entries in result list if they've reached top level
musicEntries += entries
else:
#otherwise map parent id to dict
incompleteMusicEntries[parent_id] = incompleteMusicEntries.get(parent_id,[]) + entries
if incompleteMusicEntries:
#recurse for all incomplete entries
musicEntries += self.musicEntryFromFileIds(
incompleteMusicEntries.keys(),
incompleteMusicEntries = incompleteMusicEntries,
mode = mode
)
return musicEntries
def register_file_with_db(self, fileobj):
"""add data in File object to relevant tables in media database"""
try:
self.add_to_file_table(fileobj)
word_ids = self.add_to_dictionary_table(fileobj.name)
self.add_to_search_table(fileobj.uid, word_ids)
return fileobj
except UnicodeEncodeError as e:
log.e(_("wrong encoding for filename '%s' (%s)"), fileobj.relpath, e.__class__.__name__)
def add_to_file_table(self, fileobj):
with closing(self.conn.execute('INSERT INTO files (parent, filename, filetype, isdir) VALUES (?,?,?,?)', (fileobj.parent.uid if fileobj.parent else -1, fileobj.name, fileobj.ext, 1 if fileobj.isdir else 0))) as cursor:
rowid = cursor.lastrowid
fileobj.uid = rowid
return fileobj
def add_to_dictionary_table(self, filename):
word_ids = []
for word in set(SQLiteCache.searchterms(filename)):
with closing(self.conn.execute('''SELECT rowid FROM dictionary WHERE word = ? LIMIT 0,1''', (word,))) as cursor:
wordrowid = cursor.fetchone()
if wordrowid is None:
with closing(self.conn.execute('''INSERT INTO dictionary (word) VALUES (?)''', (word,))) as cursor:
wordrowid = cursor.lastrowid
else:
wordrowid = wordrowid[0]
word_ids.append(wordrowid)
return word_ids
def add_to_search_table(self, file_id, word_id_seq):
with closing(
self.conn.executemany('INSERT INTO search (drowid, frowid) VALUES (?,?)',
((wid, file_id) for wid in word_id_seq))):
pass
def remove_recursive(self, fileobj, progress=None):
'''recursively remove fileobj and all its children from the media db.'''
if progress is None:
log.i(
_('removing dead reference(s): %s "%s"'),
'directory' if fileobj.isdir else 'file',
fileobj.relpath,
)
factory = None
remove = lambda item: self.remove_file(item)
else:
def factory(new, pnt):
if pnt is None:
return (new, None, progress)
return (new, pnt, pnt[2].spawnchild('[-] ' + new.relpath))
remove = lambda item: (self.remove_file(item[0]), item[2].tick())
deld = 0
try:
with self.conn:
for item in self.db_recursive_filelister(fileobj, factory):
remove(item)
deld += 1
except Exception as e:
log.e(_('error while removing dead reference(s): %s'), e)
log.e(_('rolled back to safe state.'))
return 0
else:
return deld
def remove_file(self, fileobj):
'''removes a file entry from the db, which means removing:
- all search references,
- all dictionary words which were orphaned by this,
- the reference in the files table.'''
try:
dead_wordids = self.remove_from_search(fileobj.uid)
self.remove_all_from_dictionary(dead_wordids)
self.remove_from_files(fileobj.uid)
except Exception as exception:
log.ex(exception)
log.e(_('error removing entry for %s'), fileobj.relpath)
raise exception
def remove_from_search(self, fileid):
'''remove all references to the given fileid from the search table.
returns a list of all wordids which had their last search references
deleted during this operation.'''
with closing(self.conn.execute(
'SELECT drowid FROM search' \
' WHERE frowid=?', (fileid,))) as cursor:
foundlist = cursor.fetchall()
wordset = set([t[0] for t in foundlist])
with closing(self.conn.execute('DELETE FROM search WHERE frowid=?', (fileid,))):
pass
for wid in set(wordset):
with closing(self.conn.execute('SELECT count(*) FROM search'
' WHERE drowid=?', (wid,))) as cursor:
count = cursor.fetchone()[0]
if count:
wordset.remove(wid)
return wordset
def remove_all_from_dictionary(self, wordids):
'''deletes all words with the given ids from the dictionary table'''
if not wordids:
return
args = list(zip(wordids))
with closing(self.conn.executemany('DELETE FROM dictionary WHERE rowid=(?)', args)):
pass
def remove_from_files(self, fileid):
'''deletes the given file id from the files table'''
with closing(self.conn.execute('DELETE FROM files WHERE rowid=?', (fileid,))):
pass
def db_recursive_filelister(self, fileobj, factory=None):
"""generator: enumerates fileobj and children listed in the db as File
objects. each item is returned before children are fetched from db.
this means that fileobj gets bounced back as the first return value."""
if factory is None:
queue = deque((fileobj,))
while queue:
item = queue.popleft()
yield item
queue.extend(self.fetch_child_files(item))
else:
queue = deque((factory(fileobj, None),))
child = lambda parent: lambda item: factory(item, parent)
while queue:
item = queue.popleft()
yield item
queue.extend(map(child(item), self.fetch_child_files(item[0])))
def fetch_child_files(self, fileobj, sort=True, reverse=False):
'''fetches from files table a list of all File objects that have the
argument fileobj as their parent.'''
with closing(self.conn.execute(
'SELECT rowid, filename, filetype, isdir' \
' FROM files where parent=?', (fileobj.uid,))) as cursor:
id_tuples = cursor.fetchall()
if sort:
id_tuples = sorted(id_tuples, key=lambda t: t[1], reverse=reverse)
return (File(name + ext,
parent=fileobj,
isdir=False if isdir == 0 else True,
uid=uid) for uid, name, ext, isdir in id_tuples)
def normalize_basedir(self):
basedir = cherry.config['media.basedir']
basedir = os.path.normcase(basedir)
if len(basedir) > 1:
basedir = basedir.rstrip(os.path.sep)
cherry.config = cherry.config.replace({'media.basedir': basedir})
log.d(_('media base directory: %r') % basedir)
@util.timed
def full_update(self):
'''verify complete media database against the filesystem and make
necesary changes.'''
log.i(_('running full update...'))
try:
self.update_db_recursive(cherry.config['media.basedir'], skipfirst=True)
except:
log.e(_('error during media update. database update incomplete.'))
finally:
self.update_word_occurrences()
log.i(_('media database update complete.'))
def partial_update(self, path, *paths):
basedir = cherry.config['media.basedir']
paths = (path,) + paths
log.i(_('updating paths: %s') % (paths,))
for path in paths:
path = os.path.normcase(path)
abspath = path if os.path.isabs(path) else os.path.join(basedir, path)
normpath = os.path.normpath(abspath)
if not normpath.startswith(basedir):
log.e(_('path is not in basedir. skipping %r') % abspath)
continue
log.i(_('updating %r...') % path)
try:
self.update_db_recursive(normpath, skipfirst=False)
except Exception as exception:
log.e(_('update incomplete: %r'), exception)
self.update_word_occurrences()
log.i(_('done updating paths.'))
def update_db_recursive(self, fullpath, skipfirst=False):
'''recursively update the media database for a path in basedir'''
from collections import namedtuple
Item = namedtuple('Item', 'infs indb parent progress')
def factory(fs, db, parent):
fileobj = fs if fs is not None else db
name = fileobj.relpath or fileobj.fullpath if fileobj else '<path not found in filesystem or database>'
if parent is None:
progress = ProgressTree(name=name)
maxlen = lambda s: util.trim_to_maxlen(50, s)
progress.reporter = ProgressReporter(lvl=1, namefmt=maxlen)
else:
progress = parent.progress.spawnchild(name)
return Item(fs, db, parent, progress)
log.d(_('recursive update for %s'), fullpath)
generator = self.enumerate_fs_with_db(fullpath, itemfactory=factory)
skipfirst and generator.send(None)
adds_without_commit = 0
add = 0
deld = 0
try:
with self.conn:
for item in generator:
infs, indb, progress = (item.infs, item.indb, item.progress)
if infs and indb:
if infs.isdir != indb.isdir:
progress.name = '[±] ' + progress.name
deld += self.remove_recursive(indb, progress)
self.register_file_with_db(infs)
adds_without_commit = 1
else:
infs.uid = indb.uid
progress.name = '[=] ' + progress.name
elif indb:
progress.name = '[-] ' + progress.name
deld += self.remove_recursive(indb, progress)
adds_without_commit = 0
continue # progress ticked by remove; don't tick again
elif infs:
self.register_file_with_db(item.infs)
adds_without_commit += 1
progress.name = '[+] ' + progress.name
else:
progress.name = '[?] ' + progress.name
if adds_without_commit == AUTOSAVEINTERVAL:
self.conn.commit()
add += adds_without_commit
adds_without_commit = 0
progress.tick()
except Exception as exc:
log.e(_("error while updating media: %s %s"), exc.__class__.__name__, exc)
log.e(_("rollback to previous commit."))
traceback.print_exc()
raise exc
finally:
add += adds_without_commit
log.i(_('items added %d, removed %d'), add, deld)
self.load_db_to_memory()
def update_word_occurrences(self):
log.i(_('updating word occurrences...'))
with closing(self.conn.execute('''UPDATE dictionary SET occurrences = (
select count(*) from search WHERE search.drowid = dictionary.rowid
)''')):
pass
def enumerate_fs_with_db(self, startpath, itemfactory=None):
'''
Starting at `startpath`, enumerates path items containing representations
for each path as it exists in the filesystem and the database,
respectively.
`startpath` and `basedir` need to be absolute paths, with `startpath`
being a subtree of `basedir`. However, no checks are being promised to
enforce the latter requirement.
Iteration is depth-first, but each path is returned before its children
are determined, to enable recursive corrective action like deleting a
whole directory from the database at once. Accordingly, the first item
to be returned will represent `startpath`. This item is guaranteed to be
returned, even if `startpath` does not exist in filesystem and database;
all other items will have at least one existing representation.
`basedir`, should it happen to equal `startpath`, will be returned as an
item. It is up to the caller to properly deal with it.
Each item has the following attributes: `infs`, a File object
representing the path in the filesystem; `indb`, a File object
representing the path in the database; and `parent`, the parent item.
All three can be None, signifying non-existence.
It is possible to customize item creation by providing an `itemfactory`.
The argument must be a callable with the following parameter signature::
itemfactory(infs, indb, parent [, optional arguments])
and must return an object satisfying the above requirements for an item.
'''
from backport.collections import OrderedDict
basedir = cherry.config['media.basedir']
startpath = os.path.normcase(startpath).rstrip(os.path.sep)
Item = itemfactory
if Item is None:
from collections import namedtuple
Item = namedtuple('Item', 'infs indb parent')
assert os.path.isabs(startpath), _('argument must be an abolute path: "%s"') % startpath
assert startpath.startswith(basedir), _('argument must be a path in basedir (%s): "%s"') % (basedir, startpath)
if not os.path.exists(startpath):
fsobj = None
elif startpath == basedir:
fsobj = File(basedir)
elif startpath > basedir:
pathparent, pathbase = os.path.split(startpath)
fsparent = self.db_find_file_by_path(pathparent, create=True)
assert fsparent is not None, _('parent path not in database: %r') % pathparent
fsobj = File(pathbase, fsparent)
del pathparent, pathbase, fsparent
else:
assert False, _("shouldn't get here! (argument path not in basedir)")
dbobj = self.db_find_file_by_path(startpath)
stack = deque()
stack.append(Item(fsobj, dbobj, None))
while stack:
item = stack.pop()
yield item
dbchildren = {}
if item.indb:
dbchildren = OrderedDict((
(f.basename, f)
for f in self.fetch_child_files(item.indb)
))
if item.infs and item.infs.isdir:
for fs_child in File.inputfilter(item.infs.children()):
db_child = dbchildren.pop(fs_child.basename, None)
stack.append(Item(fs_child, db_child, item))
for db_child in dbchildren.values():
stack.append(Item(None, db_child, item))
del dbchildren
def db_find_file_by_path(self, fullpath, create=False):
'''Finds an absolute path in the file database. If found, returns
a File object matching the database record; otherwise, returns None.
Paths matching a media basedir are a special case: these will yield a
File object with an invalid record id matching the one listed by its
children.
'''
basedir = cherry.config['media.basedir']
fullpath = os.path.normpath(fullpath)
if os.path.isabs(fullpath):
if not fullpath.startswith(basedir):
return None
else:
fullpath = os.path.join(basedir, fullpath)
relpath = fullpath[len(basedir):].strip(os.path.sep)
root = File(basedir, isdir=True, uid= -1)
if not relpath:
return root
file = root
for part in relpath.split(os.path.sep):
found = False
for child in self.fetch_child_files(file): # gotta be ugly: don't know if name/ext split in db
if part == child.basename:
found = True
file = child
break
if not found:
if create:
file = File(part, parent=file)
log.i(_('creating database entry for %r'), file.relpath)
self.register_file_with_db(file)
else:
return None
return file
if sys.version_info < (3,):
from codecs import decode
encoding = sys.getfilesystemencoding()
is_unicode = lambda s: isinstance(s, type('')) # from unicode_literals import
def _unicode_listdir(dirname):
for name in os.listdir(dirname):
try:
yield (name if is_unicode(name) else decode(name, encoding))
except UnicodeError:
log.e(_('unable to decode filename %r in %r; skipping.'),
name, dirname)
else:
_unicode_listdir = os.listdir
class File():
def __init__(self, path, parent=None, isdir=None, uid= -1):
assert isinstance(path, type('')), _('expecting unicode path, got %s') % type(path)
if len(path) > 1:
path = path.rstrip(os.path.sep)
if parent is None:
self.root = self
self.basepath = os.path.dirname(path)
self.basename = os.path.basename(path)
else:
if os.path.sep in path:
raise ValueError(_('non-root filepaths must be direct relative to parent: path: %s, parent: %s') % (path, parent))
self.root = parent.root
self.basename = path
self.uid = uid
self.parent = parent
if isdir is None:
self.isdir = os.path.isdir(os.path.abspath(self.fullpath))
else:
self.isdir = isdir
def __str__(self):
return self.fullpath
def __repr__(self):
return ('%(fp)s%(isdir)s [%(n)s%(x)s] (%(id)s)%(pid)s' %
{'fp': self.fullpath,
'isdir': '/' if self.isdir else '',
'n': self.name,
'x': self.ext,
'id': self.uid,
'pid': ' -> ' + str(self.parent.uid) if self.parent and self.parent.uid > -1 else ''
})
@property
def relpath(self):
'''this File's path relative to its root'''
up = self
components = deque()
while up != self.root:
components.appendleft(up.basename)
up = up.parent
return os.path.sep.join(components)
@property
def fullpath(self):
'''this file's relpath with leading root path'''
fp = os.path.join(self.root.basepath, self.root.basename, self.relpath)
if len(fp) > 1:
fp = fp.rstrip(os.path.sep)
return fp
@property
def name(self):
'''if this file.isdir, its complete basename; otherwise its basename
without extension suffix'''
if self.isdir:
name = self.basename
else:
name = os.path.splitext(self.basename)[0]
return name
@property
def ext(self):
'''if this file.isdir, the empty string; otherwise the extension suffix
of its basename'''
if self.isdir:
ext = ''
else:
ext = os.path.splitext(self.basename)[1]
return ext
@property
def exists(self):
'''True if this file's fullpath exists in the filesystem'''
return os.path.exists(self.fullpath)
@property
def islink(self):
'''True if this file is a symbolic link'''
return os.path.islink(self.fullpath)
def children(self, sort=True, reverse=True):
'''If self.isdir and self.exists, return an iterable of fileobjects
corresponding to its direct content (non-recursive).
Otherwise, log an error and return ().
'''
try:
content = _unicode_listdir(self.fullpath)
if sort:
content = sorted(content, reverse=reverse)
return (File(name, parent=self) for name in content)
except OSError as error:
log.e(_('cannot list directory: %s'), error)
return ()
@classmethod
def inputfilter(cls, files_iter):
basedir = cherry.config['media.basedir']
for f in files_iter:
if not f.exists:
log.e(_('file not found: %s. skipping.' % f.fullpath))
continue
if not f.fullpath.startswith(basedir):
log.e(_('file not in basedir: %s. skipping.') % f.fullpath)
continue
if f.islink and not os.path.isfile(f.fullpath):
rp = os.path.realpath(f.fullpath)
if os.path.abspath(basedir).startswith(rp) \
or (os.path.islink(basedir)
and
os.path.realpath(basedir).startswith(rp)):
log.e(_(("Cyclic symlink found: %s creates a circle "
"if followed. Skipping.")) % f.relpath)
continue
if not (f.parent is None or f.parent.parent is None):
log.e(_(("Deeply nested directory symlink found: %s . "
"All symlinks to directories "
"must be directly in your basedir (%s). The "
"program cannot safely handle them otherwise."
" Skipping.")) % (f.relpath, os.path.abspath(basedir)))
continue
yield f
class MemoryDB:
def __init__(self, db_file, table_to_dump):
log.i(_("Loading files database into memory..."))
self.db = sqlite3.connect(':memory:', check_same_thread=False)
cu = self.db.cursor()
cu.execute('attach database "%s" as attached_db' % db_file)
cu.execute("select sql from attached_db.sqlite_master "
"where type='table' and name='" + table_to_dump + "'")
sql_create_table = cu.fetchone()[0]
cu.execute(sql_create_table);
cu.execute("insert into " + table_to_dump +
" select * from attached_db." + table_to_dump)
self.db.commit()
cu.execute("detach database attached_db")
| 33,822 | Python | .py | 735 | 34.306122 | 226 | 0.583245 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,361 | pathprovider.py | devsnd_cherrymusic/cherrymusicserver/pathprovider.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import codecs
import hashlib
import os
import sys
userDataFolderName = 'cherrymusic' # $XDG_DATA_HOME/userDataFolderName
pidFileName = 'cherrymusic.pid' # $XDG_DATA_HOME/userDataFolderName/cherrymusic.pid
configFolderName = 'cherrymusic' # $XDG_CONFIG_HOME/configFolderName
configFileName = 'cherrymusic.conf' # $XDG_CONFIG_HOME/configFolderName/cherrymusic.conf
sharedFolderName = 'cherrymusic' # /usr/share/sharedFolderName
def isWindows():
return sys.platform.startswith('win')
def isLinux():
return sys.platform.startswith('linux')
def isOSX():
return sys.platform.startswith('darwin')
def getUserDataPath():
userdata = ''
if isLinux():
if 'XDG_DATA_HOME' in os.environ:
userdata = os.path.join(os.environ['XDG_DATA_HOME'],userDataFolderName)
else:
userdata = os.path.join(os.path.expanduser('~'), '.local', 'share', userDataFolderName)
elif isWindows():
userdata = os.path.join(os.environ['APPDATA'],'cherrymusic')
elif isOSX():
userdata = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support',userDataFolderName)
if not userdata:
userdata = fallbackPath()
assureFolderExists(userdata,['db','albumart','sessions'])
return userdata
def getConfigPath():
if len(sys.argv) > 2 and (sys.argv[1] == '-c' or sys.argv[1] == '--config-path') and os.path.exists(sys.argv[2]):
return sys.argv[2]
else:
configpath = ''
if isLinux():
if 'XDG_CONFIG_HOME' in os.environ:
configpath = os.path.join(os.environ['XDG_CONFIG_HOME'], configFolderName)
else:
configpath = os.path.join(os.path.expanduser('~'), '.config', configFolderName)
elif isWindows():
configpath = os.path.join(os.environ['APPDATA'],configFolderName)
elif isOSX():
configpath = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', configFolderName)
if not configpath:
configpath = fallbackPath()
assureFolderExists(configpath)
return configpath
def fallbackPath():
return os.path.join(os.path.expanduser('~'), '.cherrymusic')
def fallbackPathInUse():
for _, _, files in os.walk(fallbackPath()):
if files:
return True
return False
def pidFile():
return os.path.join(getUserDataPath(), pidFileName)
def pidFileExists():
return os.path.exists(pidFile())
def licenseFile():
owndir = os.path.dirname(__file__)
basedir = os.path.split(owndir)[0] or '.'
basedir = os.path.abspath(basedir)
return os.path.join(basedir, 'COPYING')
def configurationFile():
return os.path.join(getConfigPath(), configFileName)
def configurationFileExists():
return os.path.exists(configurationFile())
def absOrConfigPath(filepath):
if os.path.isabs(filepath):
path = filepath
else:
path = os.path.join(getConfigPath(), filepath)
return os.path.normpath(path)
def databaseFilePath(filename):
configdir = os.path.join(getUserDataPath(), 'db')
if not os.path.exists(configdir):
os.makedirs(configdir)
configpath = os.path.join(configdir, filename)
return configpath
def albumArtFilePath(directorypath):
albumartcachepath = os.path.join(getUserDataPath(), 'albumart')
if not os.path.exists(albumartcachepath):
os.makedirs(albumartcachepath)
if directorypath:
filename = _md5_hash(directorypath) + '.thumb'
albumartcachepath = os.path.join(albumartcachepath, filename)
return albumartcachepath
def assureFolderExists(folder,subfolders=['']):
for subfolder in subfolders:
dirpath = os.path.join(folder, subfolder)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def readRes(path):
with codecs.open(getResourcePath(path), encoding="utf-8") as f:
return f.read()
def getResourcePath(path):
RESOURCE_PATHS = []
if isLinux():
# check share first
RESOURCE_PATHS.append(os.path.join(sys.prefix, 'share', sharedFolderName))
# otherwise check local/share
RESOURCE_PATHS.append(os.path.join(sys.prefix, 'local', 'share', sharedFolderName))
# otherwise check local install
RESOURCE_PATHS.append(os.path.dirname(os.path.dirname(__file__)))
# lastly check homedir
RESOURCE_PATHS.append(getUserDataPath())
for prefixpath in RESOURCE_PATHS:
respath = os.path.join(prefixpath, path)
if os.path.exists(respath):
return respath
raise ResourceNotFound(
"Couldn't locate {path!r} in any {res!r}!".format(path=path, res=RESOURCE_PATHS)
)
class ResourceNotFound(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def filename(path, pathtofile=False):
if pathtofile:
return os.path.split(path)[0]
else:
return os.path.split(path)[1]
def stripext(filename):
if '.' in filename:
return filename[:filename.rindex('.')]
return filename
def _md5_hash(s):
utf8_bytestr = codecs.encode(s, 'UTF-8')
return hashlib.md5(utf8_bytestr).hexdigest()
| 6,286 | Python | .py | 161 | 33.875776 | 117 | 0.694162 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,362 | __init__.py | devsnd_cherrymusic/cherrymusicserver/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
VERSION = "0.41.4"
__version__ = VERSION
DESCRIPTION = "an mp3 server for your browser"
LONG_DESCRIPTION = """CherryMusic is a music streaming
server written in python. It's based on cherrypy and jPlayer.
You can search your collection, create and share playlists with
other users. It's able to play music on almost all devices since
it happens in your browser and uses HTML5 for audio playback.
"""
from backport import input
import re
import os
import codecs
import sys
import threading
import signal
import logging
logger = logging.getLogger(__name__)
import gettext
from cherrymusicserver import pathprovider
if sys.version_info < (3,):
gettext.install('default', unicode=True, localedir=pathprovider.getResourcePath('res/i18n'))
else:
gettext.install('default', localedir=pathprovider.getResourcePath('res/i18n'))
# woraround for cherrypy 3.2.2:
# https://bitbucket.org/cherrypy/cherrypy/issue/1163/attributeerror-in-cherrypyprocessplugins
if sys.version_info >= (3, 3):
threading._Timer = threading.Timer
import cherrypy
def version():
return """CherryMusic Server {cm_version}
a standalone music server
Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner""".format(cm_version=VERSION)
def info():
import locale
import platform
from audiotranscode import AudioTranscode
audiotranscode = AudioTranscode()
encoders = ['%s (%s)' % (enc.filetype, enc.command[0])
for enc in audiotranscode.available_encoders]
decoders = ['%s (%s)' % (enc.filetype, enc.command[0])
for enc in audiotranscode.available_decoders]
return """CherryMusic Server {cm_version}
CherryPy: {cp_version}
Python: {py_version}
Platform: {platform}
configuration dir:
{confdir}
server data dir:
{datadir}
static resources dir:
{resourcedir}
server package dir:
{packdir}
process working dir:
{workdir}
locale: {locale}, default: {deflocale}
filesystem encoding: {fs_encoding}
Available Decoders:
{decoders}
Available Encoders:
{encoders}
(Do not parse this output.)""".format(
cm_version=REPO_VERSION or VERSION,
cp_version=cherrypy.__version__,
py_version=platform.python_implementation() + ' ' + platform.python_version(),
platform=platform.platform(),
workdir=os.path.abspath(os.curdir),
packdir=os.path.abspath(__path__[0]),
confdir=pathprovider.getConfigPath(),
datadir=pathprovider.getUserDataPath(),
resourcedir=pathprovider.getResourcePath(''),
locale=str(locale.getlocale()),
deflocale=str(locale.getdefaultlocale()),
fs_encoding=sys.getfilesystemencoding(),
encoders='\n '.join(encoders),
decoders='\n '.join(decoders),
)
# patch cherrypy crashing on startup because of double checking
# for loopback interface, see:
# https://bitbucket.org/cherrypy/cherrypy/issue/1100/cherrypy-322-gives-engine-error-when
def fake_wait_for_occupied_port(host, port):
return
cherrypy.process.servers.wait_for_occupied_port = fake_wait_for_occupied_port
# end of port patch
try:
cherrypy_version = tuple(int(v) for v in cherrypy.__version__.split('.'))
min_major_cherrypy_version = 3
if cherrypy_version[0] < min_major_cherrypy_version:
print(_(
'cherrypy version is too old!\n'
'Current version: %s\n'
'Required version: %s or higher\n'
) % (cherrypy.__version__, min_major_cherrypy_version))
sys.exit(1)
except Exception as exc:
logger.error(_(
'Could not determine cherrypy version. Please install cherrypy '
'using pip or your OS\'s package manager. Trying to detect version '
'automatically.'
))
cherrypy_version = 'unknown'
# trying to detect the version to determine if we need to monkeypatch cherrypy
if cherrypy_version == 'unknown':
# decorator `cherrypy._cptools.register` was added between 5.4 and 5.5
# https://github.com/cherrypy/cherrypy/pull/1428
# commit: dff09e92fb2e83fb4248826c9bc14cd3b6281706
if 'register' in dir(cherrypy._cptools.Toolbox):
needs_serve_file_utf8_fix = False
else:
needs_serve_file_utf8_fix = True
else:
needs_serve_file_utf8_fix = cherrypy_version < (5, 5)
if needs_serve_file_utf8_fix:
# workaround for cherrypy < 5.5.0 not using unicode strings for URI, see:
# https://bitbucket.org/cherrypy/cherrypy/issue/1148/wrong-encoding-for-urls-containing-utf-8
cherrypy.lib.static.__serve_file = cherrypy.lib.static.serve_file
def serve_file_utf8_fix(path, content_type=None, disposition=None,
name=None, debug=False):
if sys.version_info >= (3,):
#python3+
# see also below: mirrored mangling of basedir for '/serve' static dir
path = codecs.decode(codecs.encode(path, 'latin-1'), 'utf-8')
return cherrypy.lib.static.__serve_file(path, content_type,
disposition, name, debug)
cherrypy.lib.static.serve_file = serve_file_utf8_fix
# end of unicode workaround
from cherrymusicserver import configuration as cfg
config = None
from cherrymusicserver import cherrymodel
from cherrymusicserver import database
from cherrymusicserver import httphandler
from cherrymusicserver import log
from cherrymusicserver import migrations
from cherrymusicserver import playlistdb
from cherrymusicserver import service
from cherrymusicserver import sqlitecache
from cherrymusicserver import userdb
from cherrymusicserver import useroptiondb
from cherrymusicserver import api
import audiotranscode
MEDIA_MIMETYPES = audiotranscode.MIMETYPES.copy()
del audiotranscode
def setup_services():
""" services can be used by other parts of the program to easily access
different functions of cherrymusic by registering themselves as
service.user
See :mod:`~cherrymusicserver.services`.
"""
service.provide('filecache', sqlitecache.SQLiteCache)
service.provide('cherrymodel', cherrymodel.CherryModel)
service.provide('playlist', playlistdb.PlaylistDB)
service.provide('users', userdb.UserDB)
service.provide('useroptions', useroptiondb.UserOptionDB)
service.provide('dbconnector', database.sql.SQLiteConnector, kwargs={
'datadir': pathprovider.databaseFilePath(''),
'extension': 'db',
'connargs': {'check_same_thread': False},
})
def setup_config(override_dict=None):
""" Updates the internal configuration using the following hierarchy:
override_dict > file_config > default_config
Notifies the user if there are new or deprecated configuration keys.
See :mod:`~cherrymusicserver.configuration`.
"""
defaults = cfg.from_defaults()
filecfg = cfg.from_configparser(pathprovider.configurationFile())
custom = defaults.replace(filecfg, on_error=log.e)
if override_dict:
custom = custom.replace(override_dict, on_error=log.e)
global config
config = custom
_notify_about_config_updates(defaults, filecfg)
def run_general_migrations():
""" Runs necessary migrations for CherryMusic data that is NOT kept inside
of databases.
This might however include relocating the database files tmhemselves,
so general migrations should run before migrating the database content.
See :mod:`~cherrymusicserver.migrations`.
"""
migrations.check_and_migrate_all()
def migrate_databases():
""" Makes sure CherryMusic's databases are up to date, migrating them if
necessary.
This might prompt the user for consent if a migration requires it and
terminate the program if no consent is obtained.
See :mod:`~cherrymusicserver.databases`.
"""
db_is_ready = database.ensure_current_version(
consentcallback=_get_user_consent_for_db_schema_update)
if not db_is_ready:
log.i(_("database schema update aborted. quitting."))
sys.exit(1)
def start_server(cfg_override=None):
""" Initializes and starts the CherryMusic server
Args:
cfg_override: A mapping of config keys to values to override those
in the config file.
"""
CherryMusic(cfg_override)
def create_user(username, password):
""" Creates a non-admin user with given username and password """
non_alnum = re.compile('[^a-z0-9]', re.IGNORECASE)
if non_alnum.findall(username):
log.e(_('usernames may only contain letters and digits'))
return False
return service.get('users').addUser(username, password, admin=False)
def delete_user(username):
userservice = service.get('users')
userid = userservice.getIdByName(username)
if userid is None:
log.e(_('user with the name "%s" does not exist!'), username)
return False
return userservice.deleteUser(userid)
def change_password(username, password):
userservice = service.get('users')
result = userservice.changePassword(username, password)
return result == 'success'
def update_filedb(paths):
""" Updates the file database in a separate thread,
possibly limited to a sequence of paths inside media.basedir
See :cls:`~cherrymusicserver.sqlitecache.SQLiteCache` methods
:meth:`~cherrymusicserver.sqlitecache.SQLiteCache.full_update` and
:meth:`~cherrymusicserver.sqlitecache.SQLiteCache.parital_update`.
"""
cache = sqlitecache.SQLiteCache()
target = cache.partial_update if paths else cache.full_update
updater = threading.Thread(name='Updater', target=target, args=paths)
updater.start()
def create_default_config_file(path):
""" Creates or overwrites a default configuration file at `path` """
cfg.write_to_file(cfg.from_defaults(), path)
log.i(_('Default configuration file written to %(path)r'), {'path': path})
class CherryMusic:
"""Sets up services (configuration, database, etc) and starts the server"""
def __init__(self, cfg_override=None):
self.setup_config(cfg_override)
setup_services()
if config['media.basedir'] is None:
print(_("Invalid basedir. Please provide a valid basedir path."))
sys.exit(1)
else:
log.debug("Basedir is %r", config['media.basedir'])
signal.signal(signal.SIGTERM, CherryMusic.stopAndCleanUp)
signal.signal(signal.SIGINT, CherryMusic.stopAndCleanUp)
if os.name == 'posix':
signal.signal(signal.SIGHUP, CherryMusic.stopAndCleanUp)
CherryMusic.create_pid_file()
self.start_server(httphandler.HTTPHandler(config))
CherryMusic.delete_pid_file()
@classmethod
def createUser(cls, credentials):
""" .. deprecated:: > 0.34.1
Use :func:`~cherrymusicserver.create_user` instead.
"""
username, password = credentials
return create_user(username, password)
@classmethod
def stopAndCleanUp(cls, signal=None, stackframe=None):
"""Delete the process id file and exit"""
CherryMusic.delete_pid_file()
print('Exiting...')
sys.exit(0)
@classmethod
def create_pid_file(cls):
"""create a process id file, exit if it already exists"""
if pathprovider.pidFileExists():
with open(pathprovider.pidFile(), 'r') as pidfile:
try:
if not sys.platform.startswith('win'):
# this call is only available on unix systems and throws
# an OSError if the process does not exist.
os.getpgid(int(pidfile.read()))
sys.exit(_("""============================================
Process id file %s already exists.
If you are sure that cherrymusic is not running, you can delete this file and restart cherrymusic.
============================================""") % pathprovider.pidFile())
except OSError:
print('Stale process id file, removing.')
cls.delete_pid_file()
with open(pathprovider.pidFile(), 'w') as pidfile:
pidfile.write(str(os.getpid()))
@classmethod
def delete_pid_file(cls):
"""Delete the process id file, if it exists"""
if pathprovider.pidFileExists():
os.remove(pathprovider.pidFile())
else:
print(_("Error removing pid file, doesn't exist!"))
@classmethod
def setup_services(cls):
"""setup services: they can be used by other parts of the program
to easily access different functions of cherrymusic by registering
themselves as service.user
.. deprecated:: > 0.34.1
Use :func:`~cherrymusicserver.setup_services` instead.
"""
setup_services()
def setup_config(self, cfg_override):
""".. deprecated:: > 0.34.1
Use :func:`~cherrymusicserver.setup_config` instead.
"""
setup_config(cfg_override)
def setup_databases(self):
""" check if the db schema is up to date
.. deprecated:: > 0.34.1
Use :func:`~cherrymusicserver.migrate_databases` instead.
"""
migrate_databases()
def start_server(self, httphandler):
"""use the configuration to setup and start the cherrypy server
"""
cherrypy.config.update({'log.screen': True})
ipv6_enabled = config['server.ipv6_enabled']
if config['server.localhost_only']:
socket_host = "::1" if ipv6_enabled else "127.0.0.1"
else:
socket_host = "::" if ipv6_enabled else "0.0.0.0"
resourcedir = os.path.abspath(pathprovider.getResourcePath('res'))
if config['server.ssl_enabled']:
cert = pathprovider.absOrConfigPath(config['server.ssl_certificate'])
pkey = pathprovider.absOrConfigPath(config['server.ssl_private_key'])
cherrypy.config.update({
'server.ssl_certificate': cert,
'server.ssl_private_key': pkey,
'server.socket_port': config['server.ssl_port'],
})
# Create second server for redirecting http to https:
redirecter = cherrypy._cpserver.Server()
redirecter.socket_port = config['server.port']
redirecter._socket_host = socket_host
redirecter.thread_pool = 10
redirecter.subscribe()
else:
cherrypy.config.update({
'server.socket_port': config['server.port'],
})
cherrypy.config.update({
'log.error_file': os.path.join(
pathprovider.getUserDataPath(), 'server.log'),
'environment': 'production',
'server.socket_host': socket_host,
'server.thread_pool': 30,
'tools.sessions.on': True,
'tools.sessions.timeout': int(config.get('server.session_duration', 60 * 24)),
})
if not config['server.keep_session_in_ram']:
sessiondir = os.path.join(
pathprovider.getUserDataPath(), 'sessions')
if not os.path.exists(sessiondir):
os.mkdir(sessiondir)
cherrypy.config.update({
'tools.sessions.storage_type': "file",
'tools.sessions.storage_path': sessiondir,
})
basedirpath = config['media.basedir']
if sys.version_info < (3,0):
basedirpath = codecs.encode(basedirpath, 'utf-8')
scriptname = codecs.encode(config['server.rootpath'], 'utf-8')
else:
if needs_serve_file_utf8_fix:
# fix cherrypy unicode issue (only for Python3)
# see patch to cherrypy.lib.static.serve_file way above and
# https://bitbucket.org/cherrypy/cherrypy/issue/1148/wrong-encoding-for-urls-containing-utf-8
basedirpath = codecs.decode(codecs.encode(basedirpath, 'utf-8'), 'latin-1')
scriptname = config['server.rootpath']
cherrypy.tree.mount(
httphandler, scriptname,
config={
'/res': {
'tools.staticdir.on': True,
'tools.staticdir.dir': resourcedir,
'tools.staticdir.index': 'index.html',
'tools.caching.on': False,
'tools.gzip.mime_types': ['text/html', 'text/plain', 'text/javascript', 'text/css'],
'tools.gzip.on': True,
},
'/serve': {
'tools.staticdir.on': True,
'tools.staticdir.dir': basedirpath,
# 'tools.staticdir.index': 'index.html', if ever needed: in py2 MUST utf-8 encode
'tools.staticdir.content_types': MEDIA_MIMETYPES,
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.caching.on': False,
'tools.cm_auth.on': True,
'tools.cm_auth.httphandler': httphandler,
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': resourcedir + '/img/favicon.ico',
}})
api.v1.mount('/api/v1')
log.i(_('Starting server on port %s ...') % config['server.port'])
cherrypy.lib.caching.expires(0) # disable expiry caching
cherrypy.engine.start()
cherrypy.engine.block()
def _cm_auth_tool(httphandler):
if not httphandler.isAuthorized():
raise cherrypy.HTTPError(403)
cherrypy.tools.cm_auth = cherrypy.Tool(
'before_handler', _cm_auth_tool, priority=70)
# priority=70 -->> make tool run after session is locked (at 50)
def _get_user_consent_for_db_schema_update(reasons):
"""Ask the user if the database schema update should happen now
"""
import textwrap
wrap = lambda r: os.linesep.join(
textwrap.wrap(r, initial_indent=' - ', subsequent_indent=" "))
msg = _("""
==========================================================================
A database schema update is needed and requires your consent.
{reasons}
To continue without changes, you need to downgrade to an earlier
version of CherryMusic.
To backup your database files first, abort for now and find them here:
{dblocation}
==========================================================================
Run schema update? [y/N]: """).format(
reasons=(2 * os.linesep).join(wrap(r) for r in reasons),
dblocation='\t' + pathprovider.databaseFilePath(''))
return input(msg).lower().strip() in ('y',)
def _notify_about_config_updates(default, known_config):
"""check if there are new or deprecated configuration keys in
the config file
"""
new = []
deprecated = []
transform = lambda s: '[{0}]: {2}'.format(*(s.partition('.')))
for property in cfg.to_list(default):
if property.key not in known_config and not property.hidden:
new.append(transform(property.key))
for property in cfg.to_list(known_config):
if property.key not in default:
deprecated.append(transform(property.key))
if new:
log.i(_('''New configuration options available:
%s
Using default values for now.'''),
'\n\t\t\t'.join(new))
if deprecated:
log.i(_('''The following configuration options are not used anymore:
%s'''),
'\n\t\t\t'.join(deprecated))
if new or deprecated:
log.i(_('Start with --newconfig to generate a new default config'
' file next to your current one.'))
def _get_version_from_git():
""" Returns more precise version string based on the current git HEAD,
or None if not possible.
"""
if not os.path.isdir('.git'):
return None
def fetch(cmdname):
import re
from subprocess import Popen, PIPE
cmd = {
'branch': ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
'version': ['git', 'describe', '--tags'],
'date': ['git', 'log', '-1', '--format=%cd'],
}
unwanted_characters = re.compile('[^\w.-]+')
try:
with open(os.devnull, 'w') as devnull:
p = Popen(cmd[cmdname], stdout=PIPE, stderr=devnull)
out, err = p.communicate() # blocks until process terminates
except:
return None
if out:
out = out.decode('ascii', 'ignore')
out = unwanted_characters.sub('', out).strip()
return out
branch = fetch('branch')
version = fetch('version')
if branch and version and '-' in version:
version, patchlevel = version.split('-', 1)
if version == VERSION: # sanity check: latest tag is for VERSION
return '{0}+{1}-{2}'.format(version, branch, patchlevel)
return None
REPO_VERSION = _get_version_from_git()
| 22,205 | Python | .py | 512 | 35.654297 | 109 | 0.647407 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,363 | log.py | devsnd_cherrymusic/cherrymusicserver/log.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# pylint: disable=W0611
from backport import logging
import logging.config
import inspect
import os
import sys
from cherrymusicserver import pathprovider
from logging import NOTSET, DEBUG, INFO, WARN, WARNING, ERROR, CRITICAL, FATAL
LOGLEVEL = INFO
class RelocateLoggingCall(logging.Filter):
'''using this module's logging methods puts some misleading values into
standard log record attributes, especially pertaining to the origin of
the logging call. this filter corrects them with the help of
extended attributes added by _get_logger()'''
def filter(self, record):
has_org = lambda seq: False if not seq else True if seq[0].startswith('org_') else has_org(seq[1:])
if has_org(dir(record)):
record.lineno = record.org_lineno
record.funcName = record.org_funcName
record.pathname = record.org_pathname
return 1
relocator = RelocateLoggingCall()
class LowPass(logging.Filter):
def __init__(self, cutoff):
self.cutoff = cutoff
def filter(self, record):
return 1 if record.levelno < self.cutoff else 0
formatter_briefest = logging.Formatter(fmt='[%(asctime)s] %(message)s', datefmt='%y%m%d-%H:%M')
formatter_brief = logging.Formatter(fmt='[%(asctime)s] %(levelname)-8s: %(message)s', datefmt='%y%m%d-%H:%M')
formatter_full = logging.Formatter(fmt=('-'*80)+ '\n%(levelname)-8s [%(asctime)s] : %(name)-20s : from line (%(lineno)d) at\n\t%(pathname)s\n\t--\n\t%(message)s\n')
handler_console = logging.StreamHandler(stream=sys.stdout)
handler_console.formatter = formatter_briefest
handler_console.level = DEBUG
handler_console.addFilter(LowPass(WARNING))
handler_console.addFilter(relocator)
handler_console_priority = logging.StreamHandler(stream=sys.stderr)
handler_console_priority.formatter = formatter_brief
handler_console_priority.level = WARNING
handler_console_priority.addFilter(relocator)
handler_file_error = logging.FileHandler(os.path.join(pathprovider.getUserDataPath(), 'error.log'), mode='a', delay=True)
handler_file_error.formatter = formatter_full
handler_file_error.level = ERROR
handler_file_error.addFilter(relocator)
logging.root.setLevel(LOGLEVEL)
logging.root.addHandler(handler_console)
logging.root.addHandler(handler_console_priority)
logging.root.addHandler(handler_file_error)
testlogger = logging.getLogger('test')
testlogger.setLevel(CRITICAL)
testlogger.addHandler(handler_console)
testlogger.addHandler(handler_console_priority)
testlogger.propagate = False
logging.getLogger('cherrypy.error').setLevel(WARNING)
def debug(msg, *args, **kwargs):
'''logs a message with severity DEBUG on the caller's module logger.
uses the root logger if caller has no module.'''
_get_logger().debug(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
'''logs a message with severity INFO on the caller's module logger.
uses the root logger if caller has no module.'''
_get_logger().info(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
'''logs a message with severity WARN on the caller's module logger.
uses the root logger if caller has no module.'''
_get_logger().warning(msg, *args, **kwargs)
def error(msg, *args, **kwargs):
'''logs a message with severity ERROR on the caller's module logger.
uses the root logger if caller has no module.'''
_get_logger().error(msg, *args, **kwargs)
def critical(msg, *args, **kwargs):
'''logs a message with severity CRITICAL on the caller's module logger.
uses the root logger if caller has no module.'''
_get_logger().critical(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
'''logs a message with severity ERROR on the caller's module logger,
including exception information. uses the root logger if caller
has no module.'''
_get_logger().exception(msg, *args, **kwargs)
def level(lvl):
'''sets the level for the caller's module logger, or, if there is no
module, the root logger. `lvl` is an int as defined in logging, or
a corresponding string respresentation.'''
_get_logger().setLevel(lvl)
__istest = False
def setTest(state=True):
global __istest
__istest = state
d = debug
i = info
w = warn
e = error
c = critical
ex = exception
x = exception
warning = warn
def _get_logger():
'''find out the caller's module name and get or create a corresponding
logger. if caller has no module, return root logger.'''
if __istest:
return testlogger
caller_frm = inspect.stack()[2]
caller_mod = inspect.getmodule(caller_frm[0])
name = None if caller_mod is None else caller_mod.__name__
orgpath = caller_frm[1]
orgfile = os.path.basename(orgpath)
caller_info = {
'org_filename': orgfile,
'org_lineno': caller_frm[2],
'org_funcName': caller_frm[3],
#'org_module': name if name else os.path.splitext(orgfile)[0],
'org_pathname': orgpath,
}
logger = logging.LoggerAdapter(logging.getLogger(name), caller_info)
return logger
| 6,190 | Python | .py | 144 | 39.090278 | 164 | 0.721019 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,364 | progress.py | devsnd_cherrymusic/cherrymusicserver/progress.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from time import time
from cherrymusicserver import log
from cherrymusicserver import util
class Progress(object):
"""Simple, timed progress tracking.
Based on the notion the time to complete a task can be broken up into
evenly spaced ticks, when a good estimate of total ticks
is known. Estimates time remaining from the time taken for past ticks.
The timer starts on the first tick."""
def __init__(self, ticks, name=''):
assert ticks > 0, "expected ticks must be > 0"
self._ticks = 0
self._expected_ticks = ticks
self._starttime = time()
self._finished = False
self._finishtime = 0
self.name = name
def _start(self):
self._starttime = time()
def tick(self):
"""Register a tick with this object. The first tick starts the timer."""
if self._ticks == 0:
self._start()
self._ticks += 1
def finish(self):
"""Mark this progress as finished. Setting this is final."""
self._finished = True
self._finishtime = time()
def formatstr(self, fstr, *args):
add = ''.join(list(args))
fstr = fstr % {'eta': self.etastr,
'percent': self.percentstr,
'ticks': self._ticks,
'total': self._expected_ticks}
return fstr + add
@property
def percent(self):
"""Number estimate of percent completed. Receiving more ticks than
initial estimate increases this number beyond 100."""
if (self._finished):
return 100
return self._ticks * 100 / self._expected_ticks
@property
def percentstr(self):
"""String version of `percent`. Invalid values outside of (0..100)
are rendered as unknown value."""
if (self._finished):
return '100%'
p = self.percent
return '%s%%' % (str(int(p)) if p <= 100 else '??')
@property
def starttime(self):
return self._starttime
@property
def runtime(self):
if (self._ticks == 0):
return 0
reftime = self._finishtime if self._finished else time()
return reftime - self.starttime
@property
def eta(self):
"""Estimate of time remaining, in seconds. Ticks beyond initial estimate
lead to a negative value."""
if self._finished:
return 0
if self._ticks == 0:
return 0
return ((self._expected_ticks - self._ticks) * self.runtime / self._ticks) + 1
@property
def etastr(self):
"""String version of remaining time estimate. A negative `eta` is marked
as positive overtime."""
overtime = ''
eta = self.eta
if eta < 0:
eta = -eta
overtime = '+'
hh, mm, ss = util.splittime(eta)
return '%(ot)s%(hh)02d:%(mm)02d:%(ss)02d' % {
'hh': hh,
'mm': mm,
'ss': ss,
'ot':overtime,
}
class ProgressTree(Progress):
'''
Extension of the Progress concept that allows spawning 'child progress'
objects that will contribute a tick to their parent on completion.
'''
def __init__(self, name=None, parent=None):
super(self.__class__, self).__init__(ticks=1, name=name)
self._parent = parent
self._active_children = set()
self.root = self
self.level = 0
self.reporter = None
def __repr__(self):
return '[%3d:%3d=%.2f] %d %.1f->[%s] %s' % (
self._ticks,
self._expected_ticks,
self.completeness,
len(self._active_children),
self.runtime,
self.etastr,
self.name,
)
def spawnchild(self, name=None):
'''Creates a child progress that will tick this progress on finish'''
if name is None:
name = self.name
child = ProgressTree(name, parent=self)
child.root = self.root
child.level = self.level + 1
self.extend()
return child
def extend(self, amount=1):
'''Raises the number of expected ticks by amount'''
assert amount > 0
if self._finished:
self.unfinish()
self._expected_ticks += amount
def unfinish(self):
'''If progress resumes after a finish has been declared, undo the
effects of finish().'''
self._finished = False
if not self._parent is None:
self._parent.untick()
self._parent._active_children.add(self)
def untick(self):
'''Take back a past tick'''
if self._ticks > 0:
if self._ticks == self._expected_ticks:
self.unfinish()
self._ticks -= 1
def _start(self):
super(self.__class__, self)._start()
if not self._parent is None:
self._parent._active_children.add(self)
def tick(self, report=True):
super(self.__class__, self).tick()
if report and self.root.reporter:
self.root.reporter.tick(self)
if self._ticks == self._expected_ticks:
self.finish()
def finish(self):
if self._finished:
return
super(self.__class__, self).finish()
if not self._parent is None:
self._parent.tick(report=False)
self._parent._active_children.remove(self)
@property
def completeness(self):
'''Ratio of registered ticks to total expected ticks. Can be > 1.'''
if self._finished:
return 1.0
c = self._ticks
for child in self._active_children:
c += child.completeness
c /= self._expected_ticks
return c
@property
def percent(self):
return self.completeness * 100
@property
def eta(self):
if self._finished:
return 0
c = self.completeness
if c == 0:
return 0
return (1 - c) * self.runtime / c
class ProgressReporter(object):
'''
Customizable progress reporter. Can report on every object with the
following attributes or properties:
name : str
a descriptive name of this progress
eta : float
estimated time to completion in seconds. negative values mean overtime
level : int >= 0
for nested progress: the nesting depth, with 0 being top
root : progress not None
for nested progress: the origin (super parent) progress; can be == this
'''
@classmethod
def timefmt(cls, eta):
'''the default time format: [+]hh:mm:ss'''
overtime = 'ETA '
if eta < 0:
eta = -eta
overtime = '+'
hh, mm, ss = util.splittime(eta)
return '%(ot)s%(hh)02d:%(mm)02d:%(ss)02d' % {
'hh': hh,
'mm': mm,
'ss': ss,
'ot':overtime,
}
@classmethod
def prettytime(cls, eta):
'''
time display with variable precision: only show the most interesting
time unit.
'''
def round_to(val, stepsize):
return (val + stepsize / 2) // stepsize * stepsize
prefix = 'ETA '
if eta < 0:
eta = -eta
prefix = '+'
hh, mm, ss = util.splittime(eta)
if hh > 3:
timestr = '%2d hrs' % hh
elif hh > 0.25:
hh = round_to(hh * 100, 25) / 100
timestr = '%.2f h' % hh
elif mm > 0.8:
timestr = '%2d min' % int(mm + 0.5)
elif ss > 20:
timestr = '%2d sec' % round_to(ss, 20)
elif ss > 5:
timestr = '%2d sec' % round_to(ss, 5)
else:
timestr = '%2d sec' % ss
return prefix + timestr
@classmethod
def prettyqty(cls, amount):
'''return a quantity as kilos (k) or megas (M) if justified)'''
if amount < 10000:
return '%d' % (amount,)
if amount < 10e6:
return '%dk' % (amount // 1000,)
if amount < 10e7:
return '%1.1fM' % (amount // 10e6,)
return '%dM' % (amount // 10e6,)
def __init__(self, lvl= -1, dly=1, timefmt=None, namefmt=None, repf=None):
'''
Creates a progress reporter with the following customization options:
lvl : int (default -1)
The maximum level in the progress hierarchy that will trigger a
report. When a report is triggered, it will contain all progress
events up to this level that have occurred since the last report. A
negative value will use the time trigger (see ``dly``) to report the
newest progress event with the upmost available level.
dly : float (default 1)
The target maximum delay between reports, in seconds. Triggers a
report conforming with ``lvl`` if ``dly`` seconds have passed
since the last report. Set to 0 to turn off timed reporting;
set to a value < 0 for a time trigger without delay.
timefmt : callable(float) -> str (default ProgressReport.timefmt)
A function that turns the number for the estimated completion time
into a string. That number is provided by ``progress.root.eta``.
Per default, it interpreted as seconds until completion, with
negative values meaning overtime since estimated completion.
namefmt : callable(str) -> str (default: no conversion)
A function that converts the name given by ``progress.name`` into
a more suitable format.
repf : callable(dict) (default: log '%(eta) %(nam) (%(tix))' as info)
Function callback to handle the actual reporting. The dict argument
contains the following items::
'eta': completion time as str,
'nam': progress name,
'tix': str giving total ticks registered with this reporter,
'progress': progress to report on, containing the raw data
'''
self._eta_adjuster = lambda e: e + 1
self._eta_formatter = self.prettytime if timefmt is None else timefmt
self._name_formatter = (lambda s: s) if namefmt is None else namefmt
self._reportfunc = (lambda d: log.i('%(eta)s %(nam)s (%(tix)s)', d)) if repf is None else repf
self._replevel = lvl
self._repintvl = dly
self._maxlevel = 0
self._levelcache = {}
self._ticks = 0
self._lastreport = 0
def tick(self, progress):
'''
Register a progress advance for progress, potentially triggering a
report. A total of ticks will be kept.
'''
self._ticks += 1
self._maxlevel = max(self._maxlevel, progress.level)
self._levelcache[progress.level] = progress
if progress.level <= self._replevel:
self.report(progress)
del self._levelcache[progress.level]
elif self._repintvl and time() - self._lastreport > self._repintvl:
self.reportlast()
def reportlast(self):
'''
Report progress events since the last report.
'''
lvl = 0
while lvl <= self._maxlevel:
if lvl in self._levelcache:
p = self._levelcache.pop(lvl)
self.report(p)
if lvl >= self._replevel:
break
lvl += 1
def report(self, progress):
'''Trigger a report for ``progress``'''
self._reportfunc({
'eta': self._eta_formatter(self._eta_adjuster(progress.root.eta)),
'nam': self._name_formatter(progress.name),
'tix': self.prettyqty(self._ticks),
'progress' : progress,
})
self._lastreport = time()
| 13,564 | Python | .py | 340 | 29.111765 | 102 | 0.556441 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,365 | browsersetup.py | devsnd_cherrymusic/cherrymusicserver/browsersetup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import os
import cherrypy
import json
import subprocess
import threading
from cherrymusicserver import pathprovider
from cherrymusicserver import configuration as cfg
class SetupHandler:
def index(self):
return pathprovider.readRes('res/setup.html')
index.exposed = True
def saveconfig(self, values):
collect_errors = cfg.error_collector()
baseconfig = cfg.from_defaults()
newconfig = json.loads(values, encoding='str')
customcfg = baseconfig.replace(newconfig, collect_errors)
if collect_errors:
badkeys = (e.key for e in collect_errors)
return json.dumps({"status": "error", 'fields': list(badkeys)})
cfg.write_to_file(customcfg, pathprovider.configurationFile())
# kill server in a second
threading.Timer(1, lambda: cherrypy.engine.exit()).start()
# so request should still reach client...
return json.dumps({"status": "success"})
saveconfig.exposed = True
def mockFeatureCheck(self):
import random
return bool(random.random()-0.5 > 0)
def checkFeature(self, featurelist, feature):
checkers = {
'ImageMagick': (Feature('convert'),
'has-imagemagick',
_('resizing of album covers'),
_('The executable "convert" was not found in you PATH')),
'Vorbis Tools': (Feature('oggenc'),
'has-has-vorbis-tools',
_('encoding and decoding of OGGs'),
_('The executables "oggenc" and "oggdec" were not found in you PATH')),
'Lame': (Feature('lame'),
'has-lame',
_('encoding and decoding of MP3s'),
_('The executable "lame" was not found in you PATH')),
'FLAC': (Feature('flac'),
'has-flac',
_('encoding and decoding of FLACs'),
_('The executable "flac" was not found in you PATH')),
'mplayer': (Feature('mplayer'),
'has-mplayer',
_('decoding OGG, MP3, FLAC, WMA and AAC'),
_('The executable "mplayer" was not found in you PATH')),
}
if feature in checkers:
installed = checkers[feature][0]()
idx = checkers[feature][1]
msg = checkers[feature][2]
explaination = checkers[feature][3]
if installed:
text = 'enables '+msg
else:
text = 'leads to missing feature: '+msg
featurelist.append([feature, installed, idx, text, explaination])
def getfeatures(self):
featurelist = []
self.checkFeature(featurelist, 'ImageMagick')
self.checkFeature(featurelist, 'Vorbis Tools')
self.checkFeature(featurelist, 'Lame')
self.checkFeature(featurelist, 'FLAC')
#self.checkFeature(featurelist, 'mplayer')
return json.dumps(featurelist)
getfeatures.exposed = True
def ping(self):
return "pong"
ping.exposed = True
class Feature:
def __init__(self, command):
self.command = command
def __call__(self):
try:
with open(os.devnull, 'w') as devnull:
subprocess.Popen([self.command],
stdout=devnull,
stderr=devnull)
return True
except OSError:
return False
def configureAndStartCherryPy(port):
if not port:
port = 8080
socket_host = "0.0.0.0"
resourcedir = os.path.abspath(pathprovider.getResourcePath('res'))
userdatapath = pathprovider.getUserDataPath()
cherrypy.config.update({
'server.socket_port': port,
'log.error_file': os.path.join(userdatapath, 'server.log'),
'environment': 'production',
'server.socket_host': socket_host,
'server.thread_pool': 30,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 24,
})
resource_config = {
'/res': {
'tools.staticdir.on': True,
'tools.staticdir.dir': resourcedir,
'tools.staticdir.index': 'index.html',
'tools.caching.on': False,
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': resourcedir+'/favicon.ico',
}
}
cherrypy.tree.mount(SetupHandler(), '/', config=resource_config)
print(_('''
Starting setup server on port {port} ...
Open your browser and put the server IP:{port} in the address bar.
If you run the server locally, use: localhost:{port}.
'''.format(port=port)))
cherrypy.lib.caching.expires(0)
cherrypy.engine.start()
cherrypy.engine.block()
| 6,179 | Python | .py | 151 | 30.801325 | 100 | 0.585496 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,366 | userdb.py | devsnd_cherrymusic/cherrymusicserver/userdb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import hashlib
import uuid
import sqlite3
from collections import namedtuple
from cherrymusicserver import database
from cherrymusicserver import log
from cherrymusicserver import service
from cherrymusicserver.database.connect import BoundConnector
DBNAME = 'user'
class UserDB:
def __init__(self, connector=None):
database.require(DBNAME, version='1')
self.conn = BoundConnector(DBNAME, connector).connection()
def addUser(self, username, password, admin):
if not (username.strip() or password.strip()):
log.d(_('empty username or password!'))
return False
user = User.create(username, password, admin)
try:
exists = self.conn.execute('SELECT username'
' FROM users WHERE lower(username) = lower(?)',
(username,)).fetchone()
if (not exists):
self.conn.execute('''
INSERT INTO users
(username, admin, password, salt)
VALUES (?,?,?,?)''',
(user.name, 1 if user.isadmin else 0, user.password, user.salt))
else:
raise sqlite3.IntegrityError;
except sqlite3.IntegrityError:
log.e('cannot create user "%s", already exists!' % user.name)
return False
self.conn.commit()
log.i('added user: ' + user.name)
return True
def isDeletable(self, userid):
#cant delete 1st admin
if not userid == 1:
return True
return False
def changePassword(self, username, newpassword):
if not newpassword.strip():
return _("not a valid password")
if self.getIdByName(username) is None:
msg = 'cannot change password: "%s" does not exist!' % username
log.e(msg)
return msg
newuser = User.create(username, newpassword, False) #dummy user for salt
self.conn.execute('''
UPDATE users SET password = ?, salt = ? WHERE lower(username) = lower(?)
''', (newuser.password, newuser.salt, newuser.name) )
self.conn.commit()
return "success"
def deleteUser(self, userid):
if self.isDeletable(userid):
self.conn.execute('''DELETE FROM users WHERE rowid = ?''', (userid,))
self.conn.commit()
return True
return False
def auth(self, username, password):
'''try to authenticate the given username and password. on success,
a valid user tuple will be returned; failure will return User.nobody().
will fail if username or password are empty.'''
if not (username.strip() and password.strip()):
return User.nobody()
rows = self.conn.execute('SELECT rowid, username, admin, password, salt'
' FROM users WHERE lower(username) = lower(?)', (username,))\
.fetchall()
assert len(rows) <= 1
if rows:
user = User(*rows[0])
if Crypto.scramble(password, user.salt) == user.password:
return user
return User.nobody()
def getUserList(self):
cur = self.conn.cursor()
cur.execute('''SELECT rowid, username, admin FROM users''')
ret = []
for uid, user, admin in cur.fetchall():
ret.append({'id':uid, 'username':user, 'admin':admin,'deletable':self.isDeletable(uid)})
return ret
def getUserCount(self):
cur = self.conn.cursor()
cur.execute('''SELECT COUNT(*) FROM users''')
return cur.fetchall()[0][0]
def getNameById(self, userid):
res = self.conn.execute('''SELECT username FROM users WHERE rowid = ?''',(userid,))
username = res.fetchone()
return username[0] if username else 'nobody'
def getIdByName(self, username):
res = self.conn.execute('''SELECT rowid FROM users WHERE lower(username) = lower(?)''',(username,))
userid = res.fetchone()
if userid:
return userid[0]
class Crypto(object):
@classmethod
def generate_salt(cls):
'''returns a random hex string'''
return uuid.uuid4().hex
@classmethod
def salted(cls, plain, salt):
'''interweaves plain and salt'''
return (plain[1::2] + salt + plain[::2])[::-1]
@classmethod
def scramble(cls, plain, salt):
'''returns a sha512-hash of plain and salt as a hex string'''
saltedpassword_bytes = cls.salted(plain, salt).encode('UTF-8')
return hashlib.sha512(saltedpassword_bytes).hexdigest()
class User(namedtuple('User_', 'uid name isadmin password salt')):
__NOBODY = None
@classmethod
def create(cls, name, password, isadmin=False):
'''create a new user with given name and password.
will add a random salt.'''
if not name.strip():
raise ValueError(_('name must not be empty'))
if not password.strip():
raise ValueError(_('password must not be empty'))
salt = Crypto.generate_salt()
password = Crypto.scramble(password, salt)
return User(-1, name, isadmin, password, salt)
@classmethod
def nobody(cls):
'''return a user object representing an unknown user'''
if User.__NOBODY is None:
User.__NOBODY = User(-1, None, None, None, None)
return User.__NOBODY
| 6,549 | Python | .py | 158 | 33.348101 | 107 | 0.626317 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,367 | service.py | devsnd_cherrymusic/cherrymusicserver/service.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Dependency injection and other facilities to match providers of services
with their users.
Nature and interface of a service are left for the concerned parties to
agree on; all this module knows about the service is its name, or "handle".
Basic usage::
>>> pizza = object()
>>> service.provide('pizzaservice', pizza)
>>> pizza is service.get('pizzaservice')
True
Types as providers and users::
>>> class PizzaService(object):
... pass
...
>>> @service.user(mypizza='pizzaservice') # become a user
... class PizzaUser(object):
... pass
...
>>> user = PizzaUser()
>>> service.provide('pizzaservice', PizzaService)
>>> isinstance(user.mypizza, PizzaService) # provider as attribute
True
"""
import threading
from cherrymusicserver import log
class MutualDependencyBreak(Exception):
"""Raised when mutually dependent providers are trying to instantiate
each other in their constructors.
This happens while creating a provider that is part of a dependency
cycle; when it is allowed, in its constructor, to access a dependency
that's also part of the cycle, a singularity is spawned which implodes the
universe. This exception is raised to prevent that.
In general, don't create cyclic dependencies. It's bad for your brain and
also a sure sign of a problematic program architecture. When confronted
with a mutual dependency, extract a third class from one of the offenders
for both to depend on.
"""
pass
__provider_factories = {}
__providercache = {}
def provide(handle, provider, args=(), kwargs={}):
""" Activate a provider for the service identified by ``handle``,
replacing a previous provider for the same service.
If the provider is a ``type``, an instance will be created as the
actual provider. Instantiation is lazy, meaning it will be deferred
until the provider is requested (:func:`get`) by some user.
To use a type as a provider, you need to wrap it into something that is
not a type.
handle : str
The name of the serivce.
provider :
An object that provides the service, or a type that instantiates
such objects. Instantiation will happen on the first get call.
args, kwargs :
Pass on arguments to a type.
"""
assert isinstance(provider, type) or not (args or kwargs)
__provider_factories[handle] = _ProviderFactory.get(provider, args, kwargs)
__providercache.pop(handle, None)
log.d('service %r: now provided by %r', handle, provider)
def get(handle):
"""Request the provider for the service identified by ``handle``.
If a type was registered for the handle, the actual provider will be the
result of instantiating the type when it is first requested.
Although the goal is to create only one instance, it is possible that
different threads see different instances.
"""
try:
return __providercache[handle]
except KeyError:
return _createprovider(handle)
class require(object):
"""Descriptor to make a service provider available as a class attribute.
>>> import cherrymusicserver.service as service
>>> class ServiceUser(object):
... mypizzas = service.require('pizzaservice')
"""
def __init__(self, handle):
self.handle = handle
def __repr__(self):
return '{0}({1!r})'.format(self.__class__.__name__, self.handle)
def __get__(self, instance, owner):
return get(self.handle)
def user(**requirements):
""" Class deocrator to inject service providers as attributes into the
decorated class.
requirements : name=handle
Create :class:`require` descriptor attributes in the class:
``name = require(handle)``.
Returns: Class Decorator
A function that takes the user class as its sole argument.
"""
def clsdecorator(cls):
for attribute, handle in requirements.items():
setattr(cls, attribute, require(handle))
return cls
return clsdecorator
def _createprovider(handle):
try:
factory = __provider_factories[handle]
except KeyError:
raise LookupError('Service not available: {0!r}'.format(handle))
return __providercache.setdefault(handle, factory.make())
class _ProviderFactory(object):
""" High security facility to contain cyclic dependency and multithreading
issues.
Factory instances guard against dependency cycles by raising a
:class:`MutualDependencyBreak` when mutually dependent providers
try to instantiate each other.
"""
_master_lock = threading.Lock()
__factories = {}
@classmethod
def get(cls, provider, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
with cls._master_lock:
try:
factory = cls.__factories[id(provider)]
factory.args = args
factory.kwargs = kwargs
except KeyError:
factory = cls(provider, args, kwargs)
cls.__factories[id(provider)] = factory
return factory
def __init__(self, provider, args=(), kwargs={}):
assert self._master_lock.locked(), 'use .get(...) to obtain instances'
self.provider = provider
self.args = args
self.kwargs = kwargs
self.__threadlocal = threading.local()
@property
def lock(self):
"""Thread-local: dependendy issues will happen inside the same thread,
so don't compete with other threads."""
local = self.__threadlocal
try:
lock = local.lock
except AttributeError:
with self._master_lock:
lock = local.__dict__.setdefault('lock', threading.Lock())
return lock
def make(self):
""" Return a provider instance.
Raises : :cls:`MutualDependencyBreak`
If called recursively within the same thread, which happens
when mutually dependent providers try to instantiate each other.
"""
if self.lock.locked():
raise MutualDependencyBreak(self.provider)
with self.lock:
if isinstance(self.provider, (type, type(Python2OldStyleClass))):
return self.provider(*self.args, **self.kwargs)
return self.provider
class Python2OldStyleClass:
"""In Python2, I am a ``classobj`` which is not the same as a ``type``."""
pass
| 7,773 | Python | .py | 186 | 34.854839 | 80 | 0.665253 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,368 | httphandler.py | devsnd_cherrymusic/cherrymusicserver/httphandler.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This class provides the api to talk to the client.
It will then call the cherrymodel, to get the
requested information"""
import os # shouldn't have to list any folder in the future!
import json
import cherrypy
import codecs
import re
import sys
try:
from urllib.parse import unquote
except ImportError:
from backport.urllib.parse import unquote
try:
from urllib import parse
except ImportError:
from backport.urllib import parse
import audiotranscode
from tinytag import TinyTag
from cherrymusicserver import userdb
from cherrymusicserver import log
from cherrymusicserver import albumartfetcher
from cherrymusicserver import service
from cherrymusicserver.pathprovider import readRes
from cherrymusicserver.pathprovider import albumArtFilePath
import cherrymusicserver as cherry
import cherrymusicserver.metainfo as metainfo
from cherrymusicserver.util import Performance, MemoryZipFile
from cherrymusicserver.ext import zipstream
import time
debug = True
@service.user(model='cherrymodel', playlistdb='playlist',
useroptions='useroptions', userdb='users')
class HTTPHandler(object):
def __init__(self, config):
self.config = config
template_main = 'res/dist/main.html'
template_login = 'res/login.html'
template_firstrun = 'res/firstrun.html'
self.mainpage = readRes(template_main)
self.loginpage = readRes(template_login)
self.firstrunpage = readRes(template_firstrun)
self.handlers = {
'search': self.api_search,
'rememberplaylist': self.api_rememberplaylist,
'saveplaylist': self.api_saveplaylist,
'loadplaylist': self.api_loadplaylist,
'generaterandomplaylist': self.api_generaterandomplaylist,
'deleteplaylist': self.api_deleteplaylist,
'getmotd': self.api_getmotd,
'restoreplaylist': self.api_restoreplaylist,
'getplayables': self.api_getplayables,
'getuserlist': self.api_getuserlist,
'adduser': self.api_adduser,
'userdelete': self.api_userdelete,
'userchangepassword': self.api_userchangepassword,
'showplaylists': self.api_showplaylists,
'logout': self.api_logout,
'downloadpls': self.api_downloadpls,
'downloadm3u': self.api_downloadm3u,
'getsonginfo': self.api_getsonginfo,
'getencoders': self.api_getencoders,
'getdecoders': self.api_getdecoders,
'transcodingenabled': self.api_transcodingenabled,
'updatedb': self.api_updatedb,
'getconfiguration': self.api_getconfiguration,
'compactlistdir': self.api_compactlistdir,
'listdir': self.api_listdir,
'fetchalbumart': self.api_fetchalbumart,
'fetchalbumarturls': self.api_fetchalbumarturls,
'albumart_set': self.api_albumart_set,
'heartbeat': self.api_heartbeat,
'getuseroptions': self.api_getuseroptions,
'setuseroption': self.api_setuseroption,
'changeplaylist': self.api_changeplaylist,
'downloadcheck': self.api_downloadcheck,
'setuseroptionfor': self.api_setuseroptionfor,
}
def issecure(self, url):
return parse.urlparse(url).scheme == 'https'
def getBaseUrl(self, redirect_unencrypted=False):
ipAndPort = parse.urlparse(cherrypy.url()).netloc
is_secure_connection = self.issecure(cherrypy.url())
ssl_enabled = cherry.config['server.ssl_enabled']
if ssl_enabled and not is_secure_connection:
log.d(_('Not secure, redirecting...'))
try:
ip = ipAndPort[:ipAndPort.rindex(':')]
except ValueError:
ip = ipAndPort # when using port 80: port is not in ipAndPort
url = 'https://' + ip + ':' + str(cherry.config['server.ssl_port'])
if redirect_unencrypted:
raise cherrypy.HTTPRedirect(url, 302)
else:
url = 'http://' + ipAndPort
return url
def index(self, *args, **kwargs):
self.getBaseUrl(redirect_unencrypted=True)
firstrun = 0 == self.userdb.getUserCount()
show_page = self.mainpage #generated main.html from devel.html
if 'devel' in kwargs:
#reload pages everytime in devel mode
show_page = readRes('res/devel.html')
self.loginpage = readRes('res/login.html')
self.firstrunpage = readRes('res/firstrun.html')
if 'login' in kwargs:
username = kwargs.get('username', '')
password = kwargs.get('password', '')
login_action = kwargs.get('login', '')
if login_action == 'login':
self.session_auth(username, password)
if cherrypy.session['username']:
username = cherrypy.session['username']
log.i(_('user {name} just logged in.').format(name=username))
elif login_action == 'create admin user':
if firstrun:
if username.strip() and password.strip():
self.userdb.addUser(username, password, True)
self.session_auth(username, password)
return show_page
else:
return "No, you can't."
if firstrun:
return self.firstrunpage
else:
if self.isAuthorized():
return show_page
else:
return self.loginpage
index.exposed = True
def isAuthorized(self):
try:
sessionUsername = cherrypy.session.get('username', None)
sessionUserId = cherrypy.session.get('userid', -1)
nameById = self.userdb.getNameById(sessionUserId)
except (UnicodeDecodeError, ValueError) as e:
# workaround for python2/python3 jump, filed bug in cherrypy
# https://bitbucket.org/cherrypy/cherrypy/issue/1216/sessions-python2-3-compability-unsupported
log.w(_('''
Dropping all sessions! Try not to change between python 2 and 3,
everybody has to relogin now.'''))
cherrypy.session.delete()
sessionUsername = None
if sessionUsername is None:
if self.autoLoginActive():
cherrypy.session['username'] = self.userdb.getNameById(1)
cherrypy.session['userid'] = 1
cherrypy.session['admin'] = True
return True
else:
return False
elif sessionUsername != nameById:
self.api_logout(value=None)
return False
return True
def autoLoginActive(self):
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if is_loopback and cherry.config['server.localhost_auto_login']:
return True
return False
def session_auth(self, username, password):
user = self.userdb.auth(username, password)
allow_remote = cherry.config['server.permit_remote_admin_login']
is_loopback = cherrypy.request.remote.ip in ('127.0.0.1', '::1')
if not is_loopback and user.isadmin and not allow_remote:
log.i(_('Rejected remote admin login from user: {name}').format(name=user.name))
user = userdb.User.nobody()
cherrypy.session['username'] = user.name
cherrypy.session['userid'] = user.uid
cherrypy.session['admin'] = user.isadmin
def getUserId(self):
try:
return cherrypy.session['userid']
except KeyError:
cherrypy.lib.sessions.expire()
cherrypy.HTTPRedirect(cherrypy.url(), 302)
return ''
def trans(self, newformat, *path, **params):
''' Transcodes the track given as ``path`` into ``newformat``.
Streams the response of the corresponding
``audiotranscode.AudioTranscode().transcodeStream()`` call.
params:
bitrate: int for kbps. None or < 1 for default
'''
if not self.isAuthorized():
raise cherrypy.HTTPRedirect(self.getBaseUrl(), 302)
cherrypy.session.release_lock()
if cherry.config['media.transcode'] and path:
# bitrate
bitrate = params.pop('bitrate', None) or None # catch empty strings
if bitrate:
try:
bitrate = max(0, int(bitrate)) or None # None if < 1
except (TypeError, ValueError):
raise cherrypy.HTTPError(400, "Bad query: "
"bitrate ({0!r}) must be an integer".format(str(bitrate)))
# path
path = os.path.sep.join(path)
if sys.version_info < (3, 0): # workaround for #327 (cherrypy issue)
path = path.decode('utf-8') # make it work with non-ascii
elif cherry.needs_serve_file_utf8_fix:
path = codecs.decode(codecs.encode(path, 'latin1'), 'utf-8')
fullpath = os.path.join(cherry.config['media.basedir'], path)
starttime = int(params.pop('starttime', 0))
transcoder = audiotranscode.AudioTranscode()
mimetype = audiotranscode.mime_type(newformat)
cherrypy.response.headers["Content-Type"] = mimetype
try:
return transcoder.transcode_stream(fullpath, newformat,
bitrate=bitrate, starttime=starttime)
except (audiotranscode.TranscodeError, IOError) as e:
raise cherrypy.HTTPError(404, e.value)
trans.exposed = True
trans._cp_config = {'response.stream': True}
def api(self, *args, **kwargs):
"""calls the appropriate handler from the handlers
dict, if available. handlers having noauth set to
true do not need authentification to work.
"""
#check action
action = args[0] if args else ''
if not action in self.handlers:
return "Error: no such action. '%s'" % action
#authorize if not explicitly deactivated
handler = self.handlers[action]
needsAuth = not ('noauth' in dir(handler) and handler.noauth)
if needsAuth and not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
handler_args = {}
if 'data' in kwargs:
handler_args = json.loads(kwargs['data'])
is_binary = ('binary' in dir(handler) and handler.binary)
if is_binary:
return handler(**handler_args)
else:
return json.dumps({'data': handler(**handler_args)})
api.exposed = True
def download_check_files(self, filelist):
# only admins and allowed users may download
if not cherrypy.session['admin']:
uo = self.useroptions.forUser(self.getUserId())
if not uo.getOptionValue('media.may_download'):
return 'not_permitted'
# make sure nobody tries to escape from basedir
for f in filelist:
# don't allow to traverse up in the file system
if '/../' in f or f.startswith('../'):
return 'invalid_file'
# CVE-2015-8309: do not allow absolute file paths
if os.path.isabs(f):
return 'invalid_file'
# make sure all files are smaller than maximum download size
size_limit = cherry.config['media.maximum_download_size']
try:
if self.model.file_size_within_limit(filelist, size_limit):
return 'ok'
else:
return 'too_big'
except OSError as e: # use OSError for python2 compatibility
return str(e)
def api_downloadcheck(self, filelist):
status = self.download_check_files(filelist)
if status == 'not_permitted':
return """You are not allowed to download files."""
elif status == 'invalid_file':
return "Error: invalid filename found in {list}".format(list=filelist)
elif status == 'too_big':
size_limit = cherry.config['media.maximum_download_size']
return """Can't download: Playlist is bigger than {maxsize} mB.
The server administrator can change this configuration.
""".format(maxsize=size_limit/1024/1024)
elif status == 'ok':
return status
else:
message = "Error status check for download: {status!r}".format(status=status)
log.e(message)
return message
def download(self, value):
if not self.isAuthorized():
raise cherrypy.HTTPError(401, 'Unauthorized')
filelist = [filepath for filepath in json.loads(unquote(value))]
dlstatus = self.download_check_files(filelist)
if dlstatus == 'ok':
_save_and_release_session()
zipmime = 'application/x-zip-compressed'
cherrypy.response.headers["Content-Type"] = zipmime
zipname = 'attachment; filename="CherryMusic-archive.zip"'
cherrypy.response.headers['Content-Disposition'] = zipname
basedir = cherry.config['media.basedir']
fullpath_filelist = [os.path.join(basedir, f) for f in filelist]
return zipstream.ZipStream(fullpath_filelist)
else:
return dlstatus
download.exposed = True
download._cp_config = {'response.stream': True}
def api_getuseroptions(self):
uo = self.useroptions.forUser(self.getUserId())
uco = uo.getChangableOptions()
if cherrypy.session['admin']:
uco['media'].update({'may_download': True})
else:
uco['media'].update({'may_download': uo.getOptionValue('media.may_download')})
return uco
def api_heartbeat(self):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption('last_time_online', int(time.time()))
def api_setuseroption(self, optionkey, optionval):
uo = self.useroptions.forUser(self.getUserId())
uo.setOption(optionkey, optionval)
return "success"
def api_setuseroptionfor(self, userid, optionkey, optionval):
if cherrypy.session['admin']:
uo = self.useroptions.forUser(userid)
uo.setOption(optionkey, optionval)
return "success"
else:
return "error: not permitted. Only admins can change other users options"
def api_fetchalbumarturls(self, searchterm, method=None):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
_save_and_release_session()
fetch_args = {} if method is None else {'method': method}
fetcher = albumartfetcher.AlbumArtFetcher(**fetch_args)
imgurls = fetcher.fetchurls(searchterm)
# show no more than 10 images
return imgurls[:min(len(imgurls), 10)]
def api_albumart_set(self, directory, imageurl):
if not cherrypy.session['admin']:
raise cherrypy.HTTPError(401, 'Unauthorized')
b64imgpath = albumArtFilePath(directory)
fetcher = albumartfetcher.AlbumArtFetcher()
data, header = fetcher.retrieveData(imageurl)
self.albumartcache_save(b64imgpath, data)
def api_fetchalbumart(self, directory):
_save_and_release_session()
default_folder_image = "../res/img/folder.png"
log.i('Fetching album art for: %s' % directory)
filepath = os.path.join(cherry.config['media.basedir'], directory)
if os.path.isfile(filepath):
# if the given path is a file, try to get the image from ID3
tag = TinyTag.get(filepath, image=True)
image_data = tag.get_image()
if image_data:
log.d('Image found in tag.')
header = {'Content-Type': 'image/jpg', 'Content-Length': len(image_data)}
cherrypy.response.headers.update(header)
return image_data
else:
# if the file does not contain an image, display the image of the
# parent directory
directory = os.path.dirname(directory)
#try getting a cached album art image
b64imgpath = albumArtFilePath(directory)
img_data = self.albumartcache_load(b64imgpath)
if img_data:
cherrypy.response.headers["Content-Length"] = len(img_data)
return img_data
#try getting album art inside local folder
fetcher = albumartfetcher.AlbumArtFetcher()
localpath = os.path.join(cherry.config['media.basedir'], directory)
header, data, resized = fetcher.fetchLocal(localpath)
if header:
if resized:
#cache resized image for next time
self.albumartcache_save(b64imgpath, data)
cherrypy.response.headers.update(header)
return data
elif cherry.config['media.fetch_album_art']:
# maximum of files to try to fetch metadata for albumart keywords
METADATA_ALBUMART_MAX_FILES = 10
#fetch album art from online source
try:
foldername = os.path.basename(directory)
keywords = foldername
# remove any odd characters from the folder name
keywords = re.sub('[^A-Za-z\s]', ' ', keywords)
# try getting metadata from files in the folder for a more
# accurate match
files = os.listdir(localpath)
for i, filename in enumerate(files):
if i >= METADATA_ALBUMART_MAX_FILES:
break
path = os.path.join(localpath, filename)
metadata = metainfo.getSongInfo(path)
if metadata.artist and metadata.album:
keywords = '{} - {}'.format(metadata.artist, metadata.album)
break
log.i(_("Fetching album art for keywords {keywords!r}").format(keywords=keywords))
header, data = fetcher.fetch(keywords)
if header:
cherrypy.response.headers.update(header)
self.albumartcache_save(b64imgpath, data)
return data
else:
# albumart fetcher failed, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
except:
# albumart fetcher threw exception, so we serve a standard image
raise cherrypy.HTTPRedirect(default_folder_image, 302)
else:
# no local album art found, online fetching deactivated, show default
raise cherrypy.HTTPRedirect(default_folder_image, 302)
api_fetchalbumart.noauth = True
api_fetchalbumart.binary = True
def albumartcache_load(self, imgb64path):
if os.path.exists(imgb64path):
with open(imgb64path, 'rb') as f:
return f.read()
def albumartcache_save(self, path, data):
with open(path, 'wb') as f:
f.write(data)
def api_compactlistdir(self, directory, filterstr=None):
try:
files_to_list = self.model.listdir(directory, filterstr)
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
return [entry.to_dict() for entry in files_to_list]
def api_listdir(self, directory):
try:
return [entry.to_dict() for entry in self.model.listdir(directory)]
except ValueError:
raise cherrypy.HTTPError(400, 'Bad Request')
def api_search(self, searchstring):
if not searchstring.strip():
jsonresults = '[]'
else:
with Performance(_('processing whole search request')):
searchresults = self.model.search(searchstring.strip())
with Performance(_('rendering search results as json')):
jsonresults = [entry.to_dict() for entry in searchresults]
return jsonresults
def api_rememberplaylist(self, playlist):
cherrypy.session['playlist'] = playlist
def api_saveplaylist(self, playlist, public, playlistname, overwrite=False):
res = self.playlistdb.savePlaylist(
userid=self.getUserId(),
public=1 if public else 0,
playlist=playlist,
playlisttitle=playlistname,
overwrite=overwrite)
if res == "success":
return res
else:
raise cherrypy.HTTPError(400, res)
def api_deleteplaylist(self, playlistid):
res = self.playlistdb.deletePlaylist(playlistid,
self.getUserId(),
override_owner=False)
if res == "success":
return res
else:
# not the ideal status code but we don't know the actual
# cause without parsing res
raise cherrypy.HTTPError(400, res)
def api_loadplaylist(self, playlistid):
return [entry.to_dict() for entry in self.playlistdb.loadPlaylist(
playlistid=playlistid,
userid=self.getUserId()
)]
def api_generaterandomplaylist(self):
return [entry.to_dict() for entry in self.model.randomMusicEntries(50)]
def api_changeplaylist(self, plid, attribute, value):
if attribute == 'public':
is_valid = type(value) == bool and type(plid) == int
if is_valid:
return self.playlistdb.setPublic(userid=self.getUserId(),
plid=plid,
public=value)
def api_getmotd(self):
if cherrypy.session['admin'] and cherry.config['general.update_notification']:
_save_and_release_session()
new_versions = self.model.check_for_updates()
if new_versions:
newest_version = new_versions[0]['version']
features = []
fixes = []
for version in new_versions:
for update in version['features']:
if update.startswith('FEATURE:'):
features.append(update[len('FEATURE:'):])
elif update.startswith('FIX:'):
fixes.append(update[len('FIX:'):])
elif update.startswith('FIXED:'):
fixes.append(update[len('FIXED:'):])
retdata = {'type': 'update', 'data': {}}
retdata['data']['version'] = newest_version
retdata['data']['features'] = features
retdata['data']['fixes'] = fixes
return retdata
return {'type': 'wisdom', 'data': self.model.motd()}
def api_restoreplaylist(self):
session_playlist = cherrypy.session.get('playlist', [])
return session_playlist
def api_getplayables(self):
"""DEPRECATED"""
return json.dumps(cherry.config['media.playable'])
def api_getuserlist(self):
if cherrypy.session['admin']:
userlist = self.userdb.getUserList()
for user in userlist:
if user['id'] == cherrypy.session['userid']:
user['deletable'] = False
user_options = self.useroptions.forUser(user['id'])
t = user_options.getOptionValue('last_time_online')
may_download = user_options.getOptionValue('media.may_download')
user['last_time_online'] = t
user['may_download'] = may_download
sortfunc = lambda user: user['last_time_online']
userlist = sorted(userlist, key=sortfunc, reverse=True)
return json.dumps({'time': int(time.time()),
'userlist': userlist})
else:
return json.dumps({'time': 0, 'userlist': []})
def api_adduser(self, username, password, isadmin):
if cherrypy.session['admin']:
if self.userdb.addUser(username, password, isadmin):
return 'added new user: %s' % username
else:
return 'error, cannot add new user!' % username
else:
return "You didn't think that would work, did you?"
def api_userchangepassword(self, oldpassword, newpassword, username=''):
isself = username == ''
if isself:
username = cherrypy.session['username']
authed_user = self.userdb.auth(username, oldpassword)
is_authenticated = userdb.User.nobody() != authed_user
if not is_authenticated:
raise cherrypy.HTTPError(403, "Forbidden")
if isself or cherrypy.session['admin']:
return self.userdb.changePassword(username, newpassword)
else:
raise cherrypy.HTTPError(403, "Forbidden")
def api_userdelete(self, userid):
is_self = cherrypy.session['userid'] == userid
if cherrypy.session['admin'] and not is_self:
deleted = self.userdb.deleteUser(userid)
return 'success' if deleted else 'failed'
else:
return "You didn't think that would work, did you?"
def api_showplaylists(self, sortby="created", filterby=''):
playlists = self.playlistdb.showPlaylists(self.getUserId(), filterby)
curr_time = int(time.time())
is_reverse = False
#translate userids to usernames:
for pl in playlists:
pl['username'] = self.userdb.getNameById(pl['userid'])
pl['type'] = 'playlist'
pl['age'] = curr_time - pl['created']
if sortby[0] == '-':
is_reverse = True
sortby = sortby[1:]
if not sortby in ('username', 'age', 'title', 'default'):
sortby = 'created'
if sortby == 'default':
sortby = 'age'
is_reverse = False
playlists = sorted(playlists, key=lambda x: x[sortby], reverse = is_reverse)
return playlists
def api_logout(self):
cherrypy.session['username'] = None
cherrypy.session['userid'] = None
cherrypy.session['admin'] = None
api_logout.no_auth = True
def api_downloadpls(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createPLS(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.pls')
api_downloadpls.binary = True
def api_downloadm3u(self, plid, hostaddr):
userid = self.getUserId()
pls = self.playlistdb.createM3U(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid)
if pls and name:
return self.serve_string_as_file(pls, name+'.m3u')
api_downloadm3u.binary = True
def export_playlists(self, format, all=False, hostaddr=''):
userid = self.getUserId()
if not userid:
raise cherrypy.HTTPError(401, _("Please log in"))
hostaddr = (hostaddr.strip().rstrip('/') + cherry.config['server.rootpath']).rstrip('/')
format = format.lower()
if format == 'm3u':
filemaker = self.playlistdb.createM3U
elif format == 'pls':
filemaker = self.playlistdb.createPLS
else:
raise cherrypy.HTTPError(400,
_('Unknown playlist format: {format!r}').format(format=format))
playlists = self.playlistdb.showPlaylists(userid, include_public=all)
if not playlists:
raise cherrypy.HTTPError(404, _('No playlists found'))
with MemoryZipFile() as zip:
for pl in playlists:
plid = pl['plid']
plstr = filemaker(plid=plid, userid=userid, addrstr=hostaddr)
name = self.playlistdb.getName(plid, userid) + '.' + format
if not pl['owner']:
username = self.userdb.getNameById(pl['userid'])
name = username + '/' + name
zip.writestr(name, plstr)
zipmime = 'application/x-zip-compressed'
zipname = 'attachment; filename="playlists.zip"'
cherrypy.response.headers["Content-Type"] = zipmime
cherrypy.response.headers['Content-Disposition'] = zipname
return zip.getbytes()
export_playlists.exposed = True
def api_getsonginfo(self, path):
basedir = cherry.config['media.basedir']
abspath = os.path.join(basedir, path)
return json.dumps(metainfo.getSongInfo(abspath).dict())
def api_getencoders(self):
return json.dumps(audiotranscode.getEncoders())
def api_getdecoders(self):
return json.dumps(audiotranscode.getDecoders())
def api_transcodingenabled(self):
return json.dumps(cherry.config['media.transcode'])
def api_updatedb(self):
self.model.updateLibrary()
return 'success'
def api_getconfiguration(self):
clientconfigkeys = {
'transcodingenabled': cherry.config['media.transcode'],
'fetchalbumart': cherry.config['media.fetch_album_art'],
'isadmin': cherrypy.session['admin'],
'username': cherrypy.session['username'],
'servepath': 'serve/',
'transcodepath': 'trans/',
'auto_login': self.autoLoginActive(),
'rootpath': cherry.config['server.rootpath'],
'version': cherry.REPO_VERSION or cherry.VERSION,
'albumart_search_methods': list(
albumartfetcher.AlbumArtFetcher.methods),
}
if cherry.config['media.transcode']:
decoders = list(self.model.transcoder.available_decoder_formats())
clientconfigkeys['getdecoders'] = decoders
encoders = list(self.model.transcoder.available_encoder_formats())
clientconfigkeys['getencoders'] = encoders
else:
clientconfigkeys['getdecoders'] = []
clientconfigkeys['getencoders'] = []
return clientconfigkeys
def serve_string_as_file(self, string, filename):
content_disposition = 'attachment; filename="'+filename+'"'
cherrypy.response.headers["Content-Type"] = "application/x-download"
cherrypy.response.headers["Content-Disposition"] = content_disposition
return codecs.encode(string, "UTF-8")
def _save_and_release_session():
""" workaround to cleanly release FileSessions in Cherrypy >= 3.3
From https://github.com/devsnd/cherrymusic/issues/483:
> CherryPy >=3.3.0 (up to current version, 3.6) makes it impossible to
> explicitly release FileSession locks, because:
> 1. FileSession.save() asserts that the session is locked; and
> 2. _cptools.SessionTool always adds a hook to call sessions.save
> before the response is finalized.
> If we still want to release the session in a controller, I guess the
> best way to work around this is to remove the hook before the
> controller returns:
"""
cherrypy.session.save()
hooks = cherrypy.serving.request.hooks['before_finalize']
forbidden = cherrypy.lib.sessions.save
hooks[:] = [h for h in hooks if h.callback is not forbidden]
# there's likely only one hook, since a 2nd call to save would always fail;
# but let's be safe, and block all calls to save :)
| 32,963 | Python | .py | 698 | 35.909742 | 107 | 0.60877 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,369 | resultorder.py | devsnd_cherrymusic/cherrymusicserver/resultorder.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""This class determines the order of the results
fetched from the database by some mystic-voodoo-
hocuspocus heuristics"""
try:
from imp import reload
except ModuleNotFoundError:
from importlib import reload
from cherrymusicserver import pathprovider
from cherrymusicserver import log
import cherrymusicserver.tweak
from cherrymusicserver.util import Performance
class ResultOrder:
def __init__(self, searchword, debug=False):
self.debug = debug
self.fullsearchterm = searchword.lower()
self.searchwords = searchword.lower().split(' ')
reload(cherrymusicserver.tweak)
self.perfect_match_bonus = cherrymusicserver.tweak.ResultOrderTweaks.perfect_match_bonus
self.partial_perfect_match_bonus = cherrymusicserver.tweak.ResultOrderTweaks.partial_perfect_match_bonus
self.starts_with_bonus = cherrymusicserver.tweak.ResultOrderTweaks.starts_with_bonus
self.folder_bonus = cherrymusicserver.tweak.ResultOrderTweaks.folder_bonus
self.word_in_file_name_bonus = cherrymusicserver.tweak.ResultOrderTweaks.word_in_file_name_bonus
self.word_not_in_file_name_penalty = cherrymusicserver.tweak.ResultOrderTweaks.word_not_in_file_name_penalty
self.word_in_file_path_bonus = cherrymusicserver.tweak.ResultOrderTweaks.word_in_file_path_bonus
self.word_not_in_file_path_penalty = cherrymusicserver.tweak.ResultOrderTweaks.word_not_in_file_path_penalty
def __call__(self,element):
file = element.path
isdir = element.dir
fullpath = file.lower()
filename = pathprovider.filename(file).lower()
filename_words = filename.split(' ')
bias = 0
occurences_bias = 0
perfect_match_bias = 0
partial_perfect_match_bias = 0
folder_bias = 0
starts_with_bias = 0
starts_with_no_track_number_bias = 0
#count occurences of searchwords
occurences=0
for searchword in self.searchwords:
if searchword in fullpath:
occurences_bias += self.word_in_file_path_bonus
else:
occurences_bias += self.word_not_in_file_path_penalty
if searchword in filename:
occurences_bias += self.word_in_file_name_bonus
else:
occurences_bias += self.word_not_in_file_name_penalty
#perfect match?
if filename == self.fullsearchterm or self.noThe(filename) == self.fullsearchterm:
perfect_match_bias += self.perfect_match_bonus
filename = pathprovider.stripext(filename)
#partial perfect match?
for searchword in self.searchwords:
if searchword in filename_words:
partial_perfect_match_bias += self.partial_perfect_match_bonus
if isdir:
folder_bias += self.folder_bonus
#file starts with match?
for searchword in self.searchwords:
if filename.startswith(searchword):
starts_with_bias += self.starts_with_bonus
#remove possible track number
while len(filename)>0 and '0' <= filename[0] <= '9':
filename = filename[1:]
filename = filename.strip()
for searchword in self.searchwords:
if filename == searchword:
starts_with_no_track_number_bias += self.starts_with_bonus
bias = occurences_bias + perfect_match_bias + partial_perfect_match_bias + folder_bias + starts_with_bias + starts_with_no_track_number_bias
if self.debug:
element.debugOutputSort = '''
fullsearchterm: %s
searchwords: %s
filename: %s
filepath: %s
occurences_bias %d
perfect_match_bias %d
partial_perfect_match_bias %d
folder_bias %d
starts_with_bias %d
starts_with_no_track_number_bias %d
------------------------------------
total bias %d
''' % (
self.fullsearchterm,
self.searchwords,
filename,
fullpath,
occurences_bias,
perfect_match_bias,
partial_perfect_match_bias,
folder_bias,
starts_with_bias,
starts_with_no_track_number_bias,
bias)
return bias
def noThe(self,a):
if a.lower().endswith((', the',', die')):
return a[:-5]
return a
| 5,467 | Python | .py | 132 | 34.787879 | 148 | 0.670489 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,370 | i18n_client.py | devsnd_cherrymusic/cherrymusicserver/i18n_client.py | def get():
return {
'track has no path set!': _('track has no path set!'),
'track has no label set!': _('track has no label set!'),
} | 157 | Python | .py | 5 | 25.8 | 64 | 0.535948 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,371 | playlistdb.py | devsnd_cherrymusic/cherrymusicserver/playlistdb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2016 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from cherrymusicserver import database
from cherrymusicserver import log
from cherrymusicserver.cherrymodel import MusicEntry
from cherrymusicserver.database.connect import BoundConnector
try:
from urllib.parse import unquote
except ImportError:
from backport.urllib.parse import unquote
DBNAME = 'playlist'
class PlaylistDB:
def __init__(self, connector=None):
database.require(DBNAME, version='1')
self.conn = BoundConnector(DBNAME, connector).connection()
def deletePlaylist(self, plid, userid, override_owner=False):
cursor = self.conn.cursor()
ownerid = cursor.execute(
"SELECT userid FROM playlists WHERE rowid = ?", (plid,)).fetchone()
if not ownerid:
return _("This playlist doesn't exist! Nothing deleted!")
if userid != ownerid[0] and not override_owner:
return _("This playlist belongs to another user! Nothing deleted.")
cursor.execute("""DELETE FROM playlists WHERE rowid = ?""", (plid,))
self.conn.commit()
return 'success'
def savePlaylist(self, userid, public, playlist, playlisttitle, overwrite=False):
if not len(playlist):
return _('I will not create an empty playlist. sorry.')
duplicate_playlist = self.conn.execute(
"""SELECT rowid, public FROM playlists WHERE userid = ? AND title = ?""",
(userid, playlisttitle)
).fetchone()
if duplicate_playlist:
if overwrite:
old_playlist_id, old_public_state = duplicate_playlist
# saving an existing playlist should keep the same public state:
public = old_public_state
self.deletePlaylist(old_playlist_id, userid)
duplicate_playlist = False
else:
return _("This playlist name already exists! Nothing saved.")
cursor = self.conn.cursor()
cursor.execute("""INSERT INTO playlists
(title, userid, public) VALUES (?,?,?)""",
(playlisttitle, userid, 1 if public else 0))
playlistid = cursor.lastrowid;
#put tracknumber to each track
numberedplaylist = []
for track, song in enumerate(playlist):
numberedplaylist.append((playlistid, track, song['url'], song['title']))
cursor.executemany("""INSERT INTO tracks (playlistid, track, url, title)
VALUES (?,?,?,?)""", numberedplaylist)
self.conn.commit()
return "success"
def loadPlaylist(self, playlistid, userid):
cursor = self.conn.cursor()
cursor.execute("""SELECT rowid FROM playlists WHERE
rowid = ? AND (public = 1 OR userid = ?) LIMIT 0,1""",
(playlistid, userid));
result = cursor.fetchone()
if result:
cursor.execute("""SELECT title, url FROM tracks WHERE
playlistid = ? ORDER BY track ASC""", (playlistid,))
alltracks = cursor.fetchall()
apiplaylist = []
for track in alltracks:
#TODO ugly hack: playlistdb saves the "serve" dir as well...
trackurl = unquote(track[1])
if trackurl.startswith('/serve/'):
trackurl = trackurl[7:]
elif trackurl.startswith('serve/'):
trackurl = trackurl[6:]
apiplaylist.append(MusicEntry(path=trackurl, repr=unquote(track[0])))
return apiplaylist
def getName(self, plid, userid ):
cur = self.conn.cursor()
cur.execute("""SELECT rowid as id,title FROM playlists WHERE
(public = 1 OR userid = ?) and rowid=?""", (userid,plid));
result = cur.fetchall()
if result:
return result[0][1]
return 'playlist'
def setPublic(self, userid, plid, public):
ispublic = 1 if public else 0
cur = self.conn.cursor()
cur.execute("""UPDATE playlists SET public = ? WHERE rowid = ? AND userid = ?""", (ispublic, plid, userid))
self.conn.commit()
def _searchPlaylist(self, searchterm):
q = '''SELECT DISTINCT playlists.rowid FROM playlists, tracks
WHERE ( tracks.playlistid = playlists.rowid
AND tracks.title LIKE ? )
OR
playlists.title LIKE ?'''
cur = self.conn.cursor()
res = cur.execute(q, ('%'+searchterm+'%', '%'+searchterm+'%'))
return [row[0] for row in res.fetchall()]
def showPlaylists(self, userid, filterby='', include_public=True):
filtered = None
if filterby != '':
filtered = self._searchPlaylist(filterby)
cur = self.conn.cursor()
select = "SELECT rowid, title, userid, public, _created FROM playlists"
if include_public:
where = """ WHERE public=:public OR userid=:userid"""
else:
where = """ WHERE userid=:userid"""
cur.execute(select + where, {'public': True, 'userid': userid});
results = cur.fetchall()
playlists = []
for result in results:
if not filtered is None and result[0] not in filtered:
continue
playlists.append({'plid': result[0],
'title': result[1],
'userid': result[2],
'public': bool(result[3]),
'owner': bool(userid==result[2]),
'created': result[4]
})
return playlists
def createPLS(self,userid,plid, addrstr):
pl = self.loadPlaylist(userid=userid, playlistid=plid)
if pl:
plsstr = '''[playlist]
NumberOfEntries={}
'''.format(len(pl))
for i,track in enumerate(pl):
trinfo = { 'idx':i+1,
'url':addrstr+'/serve/'+track.path,
'name':track.repr,
'length':-1,
}
plsstr += '''
File{idx}={url}
Title{idx}={name}
Length{idx}={length}
'''.format(**trinfo)
return plsstr
def createM3U(self,userid,plid,addrstr):
pl = self.loadPlaylist(userid=userid, playlistid=plid)
if pl:
trackpaths = map(lambda x: addrstr+'/serve/'+x.path,pl)
return '\n'.join(trackpaths)
| 7,535 | Python | .py | 172 | 33.819767 | 115 | 0.596434 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,372 | test_playlistdb.py | devsnd_cherrymusic/cherrymusicserver/test/test_playlistdb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import database
from cherrymusicserver import service
from cherrymusicserver.playlistdb import *
_DEFAULT_USERID = 1
def setup():
service.provide('dbconnector', database.sql.TmpConnector)
database.ensure_current_version(DBNAME)
def teardown():
service.provide('dbconnector', None)
def create_playlist(name, titles):
pldb = PlaylistDB()
public = True
userid = _DEFAULT_USERID
songs = [dict(title=t, url="url(" + t + ")") for t in titles]
pldb.savePlaylist(userid, public, songs, name, overwrite=True)
playlist = get_playlist(name)
assert playlist
return playlist
def test_delete_playlist():
pldb = PlaylistDB()
create_playlist('deleteme', ['delete', 'me'])
pl = get_playlist('deleteme')
assert pldb.deletePlaylist(pl['plid'], None, override_owner=True) == 'success'
assert pldb.deletePlaylist(pl['plid'], None, override_owner=True) == "This playlist doesn't exist! Nothing deleted!"
def get_playlist(name):
pldb = PlaylistDB()
for p in pldb.showPlaylists(_DEFAULT_USERID):
if p['title'] == name:
return p
def test_set_public():
pl = create_playlist('some_title', list('abc'))
assert get_playlist('some_title')['public']
PlaylistDB().setPublic(_DEFAULT_USERID, pl['plid'], False)
assert not get_playlist('some_title')['public']
if __name__ == '__main__':
nose.runmodule()
| 2,613 | Python | .py | 71 | 33.943662 | 120 | 0.726408 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,373 | test_httphandler.py | devsnd_cherrymusic/cherrymusicserver/test/test_httphandler.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from mock import *
import unittest
import json
from contextlib import contextmanager
import cherrymusicserver as cherry
from cherrymusicserver import configuration
cherry.config = configuration.from_defaults()
from cherrymusicserver import httphandler
from cherrymusicserver import service
from cherrymusicserver.cherrymodel import CherryModel, MusicEntry
class MockAction(Exception):
pass
class MockModel:
def __init__(self):
pass
def search(self,value,isFastSearch=False):
if isFastSearch:
return [MusicEntry('fast mock result','fast mock result')]
else:
return [MusicEntry('mock result','mock result')]
def motd(self):
return "motd"
def updateLibrary(self):
raise MockAction('updateLibrary')
service.provide('cherrymodel', MockModel)
class CherryPyMock:
def __init__(self):
self.session = {'admin': False}
from cherrymusicserver.playlistdb import PlaylistDB
MockPlaylistDB = Mock(spec=PlaylistDB)
service.provide('playlist', MockPlaylistDB)
@contextmanager
def mock_auth():
''' Context where user 1 is logged in '''
always_auth = lambda _: True
root_id = lambda _: 1
with patch('cherrymusicserver.httphandler.HTTPHandler.isAuthorized', always_auth):
with patch('cherrymusicserver.httphandler.HTTPHandler.getUserId', root_id):
yield
class TestHTTPHandler(unittest.TestCase):
def setUp(self):
self.http = httphandler.HTTPHandler(cherry.config)
for apicall, func in self.http.handlers.items():
try:
getattr(self,'test_'+func.__name__)
except AttributeError:
print('Missing test for api handler %s!' % func.__name__)
def tearDown(self):
pass
def call_api(self, action, **data):
with mock_auth():
return self.http.api(action, data=json.dumps(data))
def test_api_search(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'search')
def test_api_fastsearch(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'search')
def test_api_rememberplaylist(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'rememberplaylist')
def test_api_saveplaylist(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'saveplaylist')
def test_api_deleteplaylist(self):
try:
print(self.call_api('deleteplaylist', playlistid=13))
except httphandler.cherrypy.HTTPError as e:
print(e)
MockPlaylistDB.deletePlaylist.assert_called_with(13, ANY, override_owner=False)
def test_api_loadplaylist(self):
pass #needs to be tested in playlistdb
def test_api_getmotd(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getmotd')
def test_api_restoreplaylist(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'restoreplaylist')
def test_api_getplayables(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getplayables')
def test_api_getuserlist(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getuserlist')
def test_api_adduser(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'adduser')
def test_api_showplaylists(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'showplaylists')
def test_api_logout(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'logout')
def test_api_downloadpls(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'downloadpls')
def test_api_downloadm3u(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'downloadm3u')
def test_api_downloadpls_call(self):
MockPlaylistDB.getName.return_value = 'some_playlist_name'
MockPlaylistDB.createPLS.return_value = 'some_pls_string'
self.call_api('downloadpls', plid=13, hostaddr='host')
MockPlaylistDB.createPLS.assert_called_with(userid=ANY, plid=13, addrstr='host')
def test_api_downloadm3u_call(self):
MockPlaylistDB.getName.return_value = 'some_playlist_name'
MockPlaylistDB.createM3U.return_value = 'some_m3u_string'
self.call_api('downloadm3u', plid=13, hostaddr='host')
MockPlaylistDB.createM3U.assert_called_with(userid=ANY, plid=13, addrstr='host')
def test_api_export_playlists(self):
from collections import defaultdict
MockPlaylistDB.showPlaylists.return_value = [defaultdict(MagicMock)]
MockPlaylistDB.getName.return_value = 'some_playlist_name'
MockPlaylistDB.createM3U.return_value = 'some_m3u_string'
with patch('cherrypy.session', {'userid': 1}, create=True):
bytestr = self.http.export_playlists(hostaddr='hostaddr', format='m3u')
import io, zipfile
zip = zipfile.ZipFile(io.BytesIO(bytestr), 'r')
try:
badfile = zip.testzip()
assert badfile is None
filenames = zip.namelist()
assert ['some_playlist_name.m3u'] == filenames, filenames
content = zip.read('some_playlist_name.m3u')
assert 'some_m3u_string'.encode('ASCII') == content, content
finally:
zip.close()
def test_api_getsonginfo(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getsonginfo')
def test_api_getencoders(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getencoders')
def test_api_getdecoders(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getdecoders')
def test_api_transcodingenabled(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'transcodingenabled')
def test_api_updatedb(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'updatedb')
def test_api_compactlistdir(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'compactlistdir')
def test_api_getconfiguration(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getconfiguration')
def test_api_getuseroptions(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'getuseroptions')
def test_api_userdelete_needs_auth(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'userdelete')
def test_api_userdelete_call(self):
session = {'userid': 1, 'admin': True}
userdb = Mock()
with patch('cherrypy.session', session, create=True):
with patch('cherrymusicserver.service.get') as service:
service.return_value = userdb
self.call_api('userdelete', userid=13)
userdb.deleteUser.assert_called_with(13)
def test_api_heartbeat(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'heartbeat')
def test_api_fetchalbumart(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
if not self.http.handlers['fetchalbumart'].noauth:
self.assertRaises(AttributeError, self.http.api, 'fetchalbumart')
def test_api_setuseroption(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'setuseroption')
def test_api_changeplaylist(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
if not self.http.handlers['fetchalbumart'].noauth:
self.assertRaises(AttributeError, self.http.api, 'fetchalbumart')
def test_api_listdir(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'listdir')
def test_api_listdir_must_call_cherrymodel_listdir(self):
mock = MagicMock(spec=CherryModel)
oldservice = service.get('cherrymodel')
service.provide('cherrymodel', mock)
self.http.api_listdir('dir')
mock.listdir.assert_called_with('dir')
service.provide('cherrymodel', oldservice)
def test_api_compactlistdir_must_call_cherrymodel_listdir(self):
mock = MagicMock(spec=CherryModel)
oldservice = service.get('cherrymodel')
service.provide('cherrymodel', mock)
self.http.api_compactlistdir('dir', filterstr='x')
mock.listdir.assert_called_with('dir', 'x')
service.provide('cherrymodel', oldservice)
def test_api_userchangepassword(self):
"""when attribute error is raised, this means that cherrypy
session is used to authenticate the http request."""
self.assertRaises(AttributeError, self.http.api, 'userchangepassword')
def test_trans():
import sys
if sys.version_info < (3, 0):
test_paths = ['path', ('p\xc3\xb6th', u'pöth')]
elif cherry.needs_serve_file_utf8_fix:
test_paths = ['path',
('pöth'.encode('utf-8').decode('latin-1'), 'pöth')]
else:
test_paths = ['path', 'pöth']
for path in test_paths:
yield check_trans, path
def check_trans(path):
import os
path, expectPath = (path, path) if isinstance(path, str) else path
config = {'media.basedir': 'BASEDIR', 'media.transcode': True}
with mock_auth():
with patch('cherrymusicserver.httphandler.cherry.config', config):
with patch('cherrymusicserver.httphandler.cherrypy'):
with patch('cherrymusicserver.httphandler.audiotranscode.AudioTranscode') as transcoder:
transcoder.return_value = transcoder
expectPath = os.path.join(config['media.basedir'], expectPath)
httphandler.HTTPHandler(config).trans('newformat', path, bitrate=111)
transcoder.transcode_stream.assert_called_with(expectPath, 'newformat', bitrate=111, starttime=0)
if __name__ == "__main__":
unittest.main()
| 14,071 | Python | .py | 279 | 42.860215 | 117 | 0.694282 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,374 | test_useroptiondb.py | devsnd_cherrymusic/cherrymusicserver/test/test_useroptiondb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from nose.tools import *
from cherrymusicserver import service
from cherrymusicserver import database
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import useroptiondb
from cherrymusicserver.useroptiondb import UserOptionDB
def setup_module():
service.provide('dbconnector', database.sql.MemConnector)
database.ensure_current_version(useroptiondb.DBNAME, autoconsent=True)
def test_constructor():
UserOptionDB()
if __name__ == '__main__':
nose.runmodule()
| 1,601 | Python | .py | 45 | 34.044444 | 74 | 0.773902 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,375 | test_pathprovider.py | devsnd_cherrymusic/cherrymusicserver/test/test_pathprovider.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
import os.path
import re
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import pathprovider
def test_absOrConfigPath():
relpath = 'relpath'
abspath = os.path.abspath(relpath)
ok_(pathprovider.absOrConfigPath(relpath).startswith(pathprovider.getConfigPath()))
eq_(abspath, pathprovider.absOrConfigPath(abspath))
def test_albumArtFilePath():
"""albumArtFilePath contains md5-filename, or no filename with empty argument"""
testpath = pathprovider.albumArtFilePath('a/s/d')
artfolder, filename = os.path.split(testpath)
ok_(re.match(r'^[0-9a-fA-F]{32}\.thumb$', filename), filename)
eq_(artfolder, pathprovider.albumArtFilePath('').rstrip(os.path.sep))
if __name__ == '__main__':
nose.runmodule()
| 1,907 | Python | .py | 51 | 35.431373 | 87 | 0.753113 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,376 | test_albumartfetcher.py | devsnd_cherrymusic/cherrymusicserver/test/test_albumartfetcher.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from __future__ import unicode_literals
import nose
from mock import *
from nose.tools import *
from cherrymusicserver.test import helpers
from binascii import unhexlify
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import albumartfetcher
def test_methods():
for method in albumartfetcher.AlbumArtFetcher.methods:
yield try_method, method
def try_method(method, timeout=15):
fetcher = albumartfetcher.AlbumArtFetcher(method=method, timeout=timeout)
results = fetcher.fetchurls('best of')
results += fetcher.fetchurls('best of') # once is not enough sometimes (?)
ok_(results, "method {0!r} results: {1}".format(method, results))
def test_fetchLocal_id3():
"""Album art can be fetched with tinytag"""
# PNG image data, 1 x 1, 1-bit grayscale, non-interlaced
_PNG_IMG_DATA = unhexlify(b''.join(b"""
8950 4e47 0d0a 1a0a 0000 000d 4948 4452
0000 0001 0000 0001 0100 0000 0037 6ef9
2400 0000 1049 4441 5478 9c62 6001 0000
00ff ff03 0000 0600 0557 bfab d400 0000
0049 454e 44ae 4260 82""".split()))
fetcher = albumartfetcher.AlbumArtFetcher()
with patch('cherrymusicserver.albumartfetcher.TinyTag') as TinyTagMock:
TinyTagMock.get().get_image.return_value = _PNG_IMG_DATA
with helpers.tempdir('test_albumartfetcher') as tmpd:
artpath = helpers.mkpath('test.mp3', parent=tmpd)
fetcher.fetchLocal(tmpd)
TinyTagMock.get.assert_called_with(artpath, image=True)
assert TinyTagMock.get().get_image.called
if __name__ == '__main__':
nose.runmodule()
| 2,683 | Python | .py | 66 | 37.621212 | 81 | 0.739915 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,377 | __init__.py | devsnd_cherrymusic/cherrymusicserver/test/__init__.py | import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
if __name__ == '__main__':
unittest.main()
| 150 | Python | .py | 7 | 18.428571 | 32 | 0.652482 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,378 | test_database.py | devsnd_cherrymusic/cherrymusicserver/test/test_database.py | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import unittest
import sqlite3
import os
from cherrymusicserver import log
log.setTest()
import cherrymusicserver.database.defs as defs
from cherrymusicserver.database.sql import MemConnector
class TestDefs(unittest.TestCase):
def setUp(self):
defdir = os.path.dirname(defs.__file__)
dbnames = tuple(n for n in os.listdir(defdir) if not n.startswith('__'))
self.defs = dict((name, defs.get(name)) for name in dbnames)
def test_all_databases_are_defined(self):
required = ('cherry.cache', 'playlist', 'user', 'useroptions')
missing = set(required) - set(self.defs)
assert not missing, "no database definitions must be missing " + str(missing)
def test_versionnames_are_all_ints(self):
def check(dbdef):
"version names must all be integers"
nonint = [version for version in dbdef if not version.isdigit()]
if nonint:
yield nonint
self.run_forall_defs(check)
def test_versionnames_are_consecutive_starting_0_or_1(self):
def check(dbdef):
"versions must be consecutive and start with 0 or 1"
versions = sorted(int(v) for v in dbdef)
min, max = versions[0], versions[-1]
expected = list(range(1 if min else 0, max + 1))
if expected != versions:
yield versions
self.run_forall_defs(check)
def test_versionkeys_required_are_present(self):
required = ('create.sql', 'drop.sql', 'update.sql')
initially_ok = ('update.sql',)
def check(dbdef):
"database version must include required keys"
for vnum, vdef in dbdef.items():
missing = set(required) - set(vdef)
if vnum == min(dbdef):
missing -= set(initially_ok)
if missing:
yield vnum, missing
self.run_forall_defs(check)
def test_versionpermutations_are_updatable(self):
def check(dbdef):
"incremental updates must work for all versions"
start, stop = int(min(dbdef)), int(max(dbdef)) + 1
program = ((i, range(i + 1, stop)) for i in range(start, stop))
for base, updates in program:
connector = MemConnector().bound(None) # new MemConnector for fresh db
try:
create(dbdef, base, connector)
update(dbdef, updates, connector)
except AssertionError as e:
yield 'base version: {0} {1}'.format(base, e.args[0])
break # don't accumulate errors
self.run_forall_defs(check)
def test_versionremoval_drop_clears_db(self):
def check(dbdef):
"drop script must clear the database"
for version in dbdef:
connector = MemConnector().bound(None)
create(dbdef, version, connector)
drop(dbdef, version, connector)
remaining = connector.execute(
"SELECT * FROM sqlite_master WHERE name NOT LIKE 'sqlite_%'"
).fetchall()
if remaining:
yield '{0}:drop.sql'.format(version), remaining
self.run_forall_defs(check)
def run_forall_defs(self, check):
errors = []
for dbname, dbdef in self.defs.items():
for error in check(dbdef):
errors += ['{0}: {1}: {2}'.format(check.__doc__, dbname, error)]
assert not errors, os.linesep + os.linesep.join(errors)
def create(dbdef, vnum, connector):
with connector.connection() as c:
runscript(dbdef, vnum, 'create.sql', c)
runscript(dbdef, vnum, 'after.sql', c, missing_ok=True)
def drop(dbdef, vnum, connector):
with connector.connection() as c:
runscript(dbdef, vnum, 'drop.sql', c)
def update(dbdef, vnums, connector):
for vnum in vnums:
with connector.connection() as c:
runscript(dbdef, vnum, 'update.sql', c)
runscript(dbdef, vnum, 'after.sql', c, missing_ok=True)
def runscript(dbdef, vnum, scriptname, conn, missing_ok=False):
'''Run an SQL script, statement per statement, and give a helpful
message on error.
'''
try:
script = dbdef[str(vnum)][scriptname]
except KeyError:
if missing_ok:
return
raise
lno = 1
for stmt in split_sqlscript(script):
linecount = stmt.count('\n') # yeah, linux linesep.
try:
cursor = conn.cursor()
cursor.execute(stmt.strip())
except sqlite3.Error as e:
if stmt.splitlines() and not stmt.splitlines()[0].strip(): # skip 1st line if empty
lno += 1
linecount -= 1
msg = '{br}{script}:{br}{listing}{br}{br}{error}'.format(
script='{0}:{1}:{2}'.format(vnum, scriptname, lno),
listing=os.linesep.join(script_lines(script, lno, linecount + 1)),
error=e,
br=os.linesep)
raise AssertionError(msg)
else:
lno += linecount
finally:
cursor.close()
def split_sqlscript(script):
import re
stmts = [x + ';' for x in script.split(';')]
i = 0
while i < len(stmts):
if re.search(r'\bBEGIN\b', stmts[i], re.I):
while (i + 1) < len(stmts) and not re.search(r'\bEND\b', stmts[i], re.I):
stmts[i] += stmts[i + 1]
del stmts[i + 1]
if re.search(r'\bEND\b', stmts[i], re.I):
break
i += 1
return stmts
def script_lines(script, start=1, length=0):
'''A range of lines from a text file, including line number prefix.'''
stop = start + length
gutterwidth = len(str(stop)) + 1
i = 0
for line in script.splitlines()[start - 1:stop - 1]:
yield '{n:{w}}| {line}'.format(
n=start + i,
w=gutterwidth,
line=line
)
i += 1
if __name__ == '__main__':
unittest.main()
| 7,167 | Python | .py | 175 | 32.04 | 96 | 0.600316 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,379 | test_userdb.py | devsnd_cherrymusic/cherrymusicserver/test/test_userdb.py | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import unittest
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import database
from cherrymusicserver import service
from cherrymusicserver import userdb
from cherrymusicserver.database.sql import MemConnector, TmpConnector
class TestAuthenticate(unittest.TestCase):
'''test authentication functions of userdb'''
def setUp(self, dbconnector=MemConnector):
service.provide('dbconnector', dbconnector)
database.ensure_current_version(userdb.DBNAME)
self.users = userdb.UserDB()
self.users.addUser('user', 'password', False)
#unittest2 compability
if not hasattr(self,'assertTupleEqual'):
def assertTupEq(t1,t2,msg):
if not all(i==j for i,j in zip(t1,t2)):
raise AssertionError(msg)
self.assertTupleEqual = assertTupEq
#end of workaround
def tearDown(self):
pass
def testRegisteredUserCanLogin(self):
'''successful authentication must return authenticated user'''
authuser = self.users.auth('user', 'password')
self.assertEqual('user', authuser.name,
'authentication must return authenticated user')
def testNoLoginWithWrongPassword(self):
'''valid username and invalid password = authentication failure'''
authuser = self.users.auth('user', 'passwordtypo')
self.assertTupleEqual(userdb.User.nobody(), authuser,
'authentication failure must return invalid user')
def testNoLoginWithInvalidUser(self):
'''invalid username = authentication failure'''
authuser = self.users.auth('!@#$%^&*(', ')(*&^%$#')
self.assertTupleEqual(userdb.User.nobody(), authuser,
'authentication failure must return invalid user')
def testChangePassword(self):
connector = TmpConnector() # use different connections, don't share
self.setUp(connector)
#create new user
self.users.addUser('newpwuser', 'password', False)
msg = self.users.changePassword('newpwuser', 'newpassword')
self.assertEqual(msg, "success")
authuser = self.users.auth('newpwuser', 'password')
self.assertTupleEqual(userdb.User.nobody(), authuser,
'authentication with old password after change must fail')
self.users = userdb.UserDB() # force different DB connection
authuser = self.users.auth('newpwuser', 'newpassword')
self.assertEqual('newpwuser', authuser.name,
'authentication with new password failed')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 3,802 | Python | .py | 84 | 38.666667 | 83 | 0.691849 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,380 | test_configuration.py | devsnd_cherrymusic/cherrymusicserver/test/test_configuration.py | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import unittest
from nose.tools import raises
try:
from collections import OrderedDict
except ImportError:
from backport.collections import OrderedDict
import cherrymusicserver.configuration as cfg
from cherrymusicserver.configuration import Key, Property, Configuration
from cherrymusicserver import log
log.setTest()
class TestKey(object):
def testConstructor(self):
assert '' == str(Key())
assert '' == str(Key(None))
assert '' == str(Key(str()))
assert 'a.b' == str(Key('a.b'))
assert 'a.b' == str(Key('A.B'))
def testValidation(self):
for name in '. 1 _ $'.split() + [object()]:
try:
Key(name)
except cfg.ConfigError:
pass
else:
assert False, 'must not accept {0} as Key name'.format(name)
def testEquals(self):
assert None == Key()
assert '' == Key()
assert Key() == Key()
assert Key('a') == Key('A')
assert 'a' == Key('a')
assert Key('a') != Key('b')
assert not('a' != Key('A'))
def testAdd(self):
assert '' == Key() + Key()
assert '' == Key() + None
assert 'string' == Key() + 'string'
assert 'a.b' == Key('a') + 'b'
def testRightAdd(self):
assert 'a.b' == 'a' + Key('b')
def testAssignAdd(self):
key = Key('a')
second_key = key
second_key += 'b'
assert key is not second_key
assert 'a.b' == second_key, (second_key)
assert 'a' == key, (key)
class TestProperty(object):
attributes = 'key value type valid readonly hidden doc'.split()
def test_tupleness_attributes_and_defaults(self):
"""A property is a tuple with named values."""
default = OrderedDict.fromkeys(self.attributes, None)
default['key'] = ''
p = Property()
assert tuple(default.values()) == p, p
for attrname, value in default.items():
assert getattr(p, attrname) == value
def test_key_is_normalized(self):
assert 'x.y' == Property('X.Y').key
def test_type_inferrence(self):
for T in (bool, int, float, str, type(''),):
assert T.__name__ == Property(value=T()).type, T
class UnknownType:
pass
assert None == Property(value=UnknownType()).type
assert 'float' == Property('', 4, float).type
def test_autocast(self):
assert 13 == Property('', '13', int).value
@raises(cfg.ConfigValueError)
def test_bad_value_for_type(self):
Property('', 'a', int)
@raises(cfg.ConfigValueError)
def test_validation_by_regex(self):
assert 0 == Property('', 0, valid='[0-9]').value
Property('', ['x'], valid='[0-9]')
@raises(cfg.ConfigValueError)
def test_validation_by_callable(self):
Property('', False, valid=lambda v: v)
def test_None_value_is_not_cast_or_validated(self):
assert None == Property(type=bool, valid=lambda v: v is not None).value
def test_to_dict(self):
p = Property('bla', 12, int, '\d+', True, True, '')
assert p == Property(**p.to_dict())
def test_replace_without_values(self):
p = Property('a', 5, int, '\d+', False, False, 'doc')
assert p == p.replace()
assert p == p.replace(**dict.fromkeys(self.attributes))
@raises(cfg.ConfigWriteError)
def test_cannot_replace_if_readonly(self):
Property(readonly=True).replace()
@raises(cfg.ConfigWriteError)
def test_replace_key(self):
assert 'different.key' == Property().replace(key='different.key').key
Property('some.key').replace(key='different.key')
def test_replace_value(self):
p = Property(value='original')
assert 'new' == p.replace(value='new').value
assert 'original' == p.replace(value=None).value
def test_replace_type(self):
assert 'int' == Property().replace(type=int).type
assert 'int' == Property(type=int).replace(type=str).type
def test_replace_attributes_only_overridden_if_None(self):
for attrname in self.attributes[3:]:
good = {attrname: ''} # a False value != None to make readonly work
bad = {attrname: 'unwanted'}
assert '' == getattr(Property().replace(**good), attrname)
assert '' == getattr(Property(**good).replace(**bad), attrname)
def test_immutable(self):
p = Property()
assert p is not p.replace(**p.to_dict())
for attrname in self.attributes:
try:
setattr(p, attrname, None)
except AttributeError:
pass
else:
assert False, 'must not be able to change %r ' % (attrname,)
class TestConfiguration:
def test_constructor(self):
from collections import Mapping
assert isinstance(Configuration(), Mapping)
assert not len(Configuration())
def test_equals_works_with_dict(self):
assert {} == Configuration()
assert {'a': 1} != Configuration()
def test_from_and_to_properties(self):
properties = [Property('a'),
Property('a.b', 5, int, '\d+', True, True, 'doc'),
Property('b', 5, int, '\d+', True, True, 'doc')]
conf = Configuration.from_properties(properties)
assert properties == list(conf.to_properties())
assert 'a' in conf
assert 'a.b' in conf
assert 'b' in conf
def test_from_mapping(self):
mapping = {'a': None, 'a.b': 5, 'b': 7}
assert mapping == Configuration.from_mapping(mapping)
def test_attribute_access(self):
p = Property('b', 5, int, '\d+', True, True, 'doc')
conf = Configuration.from_properties([p])
assert 5 == conf['b']
assert p == conf.property('b')
def test_builder(self):
properties = [Property('a', 5), Property('a.b', 6, int, '6.*', True, True, 'doc')]
cb = cfg.ConfigBuilder()
with cb['a'] as a:
a.value = 5
with a['b'] as ab:
ab.value = 6
ab.valid = '6.*'
ab.readonly = True
ab.hidden = True
ab.doc = 'doc'
assert properties == list(cb.to_configuration().to_properties())
def test_inheritance_of_property_attributes(self):
cb = cfg.ConfigBuilder()
with cb['parent'] as parent:
parent.valid = '.*'
parent.readonly = True
parent.hidden = True
with parent['child'] as child:
child.value = 4
childprop = cb.to_configuration().property('parent.child')
assert '.*' == childprop.valid
assert childprop.readonly
assert childprop.hidden
def test_update(self):
conf = Configuration.from_properties([Property('b', 'old')])
newvalues = {'b': 'replaced', 'c': 'new'}
assert newvalues == conf.update(newvalues)
def test_replace_changes_existing(self):
conf = Configuration.from_properties([Property('b', 'old')])
newvalues = {'b': 'replaced'}
assert newvalues == conf.replace(newvalues)
@raises(cfg.ConfigKeyError)
def test_replace_cannot_add_new(self):
Configuration().replace({'new': None})
class TestTransformers(unittest.TestCase):
def test_value_conversions(self):
def assert_value_conversion(kind, testvalue, expected):
p = Property('test', testvalue, type=kind)
actual = p.value
self.assertEqual(
expected, actual,
('Bad %s conversion for value: %r! expect: %r, actual: %r'
% (kind, p.value, expected, actual)))
def assert_value_conversions(kind, val_exp_pairs):
for testvalue, expected in val_exp_pairs:
assert_value_conversion(kind, testvalue, expected)
assert_value_conversions('str', ((' ', ''),
(None, None),
))
assert_value_conversions('int', (('99', 99),
('-1', -1),
(None, None),
))
assert_value_conversions('float', (('99', 99),
('1.2', 1.2),
('1.2e3', 1200),
(None, None),
))
assert_value_conversions('bool', (('1', True),
('0', False),
('Yes', True),
('Y', True),
('NO', False),
('N', False),
('truE', True),
('False', False),
('', False),
(None, None),
))
if __name__ == "__main__":
unittest.main()
| 10,335 | Python | .py | 243 | 31.650206 | 90 | 0.551342 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,381 | test_util.py | devsnd_cherrymusic/cherrymusicserver/test/test_util.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from nose.tools import *
from cherrymusicserver import util
from cherrymusicserver import log
log.setTest()
def test_maxlen_trim():
assert util.trim_to_maxlen(7, 'abcdefghi') == 'a ... i'
def test_phrase_to_lines():
phrase = '''qwertyui9o0 sdfghjk dfghjk dfghj fghjk dfghjk fghj fghj
ghjfdkj ahg jkdgf sjkdfhg skjfhg sjkfh sjkd fhgsjd hgf sdjhgf skjg
fg hjkfghjk fghjk gfhjk fghj fghjk ghj fghj gfhjk fghj ghj
asd'''
lines = util.phrase_to_lines(phrase, length=80)
assert 60 < len(lines[0]) < 80
assert 60 < len(lines[1]) < 80
assert len(lines[1]) < 80
def test_moving_average():
mov = util.MovingAverage(size=2)
assert mov.avg == 0
assert mov.min == 0
assert mov.max == 0
mov.feed(2)
assert mov.avg == 1
assert mov.min == 0
assert mov.max == 2
def test_time2text():
assert util.time2text(0) == 'just now'
for mult in [60, 60*60, 60*60*24, 60*60*24*31, 60*60*24*365]:
for i in [-1, -3, 1, 3]:
assert util.time2text(i * mult)
def test_performance_logger():
with util.Performance('potato head') as p:
p.log('elephant')
if __name__ == '__main__':
nose.runmodule()
| 2,279 | Python | .py | 65 | 32.184615 | 71 | 0.705082 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,382 | test_sqlitecache.py | devsnd_cherrymusic/cherrymusicserver/test/test_sqlitecache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
import nose
import unittest
from nose.tools import *
from cherrymusicserver.test.helpers import cherrytest, tempdir, symlinktest
import os
import re
import shutil
import sys
import tempfile
import cherrymusicserver as cherry
from cherrymusicserver import configuration
from cherrymusicserver import database
from cherrymusicserver import log
from cherrymusicserver import sqlitecache
from cherrymusicserver import service
sqlitecache.debug = True
from cherrymusicserver.database.sql import MemConnector
log.setTest()
class TestFile(object):
def __init__(self, fullpath, parent=None, isdir=None, uid=None):
self.uid = uid if uid else -1
self.fullpath = fullpath if not parent else os.path.join(parent.fullpath, fullpath)
self.parent = parent
self.isdir = fullpath.endswith(os.path.sep) if (isdir is None) else isdir
if self.isdir:
self.fullpath = fullpath[:-1]
self.name = os.path.basename(self.fullpath)
self.ext = ''
else:
self.name, self.ext = os.path.splitext(os.path.basename(fullpath))
def __repr__(self):
return '[%d] %s%s (-> %d)' % (self.uid,
self.name + self.ext,
'*' if self.isdir else '',
- 1 if self.parent is None
else self.parent.uid)
@property
def exists(self):
return os.path.exists(self.fullpath)
@classmethod
def enumerate_files_in(cls, somewhere, sort):
raise NotImplementedError("%s.%s.enumerate_files_in(cls, paths, sort)"
% (__name__, cls.__name__))
tmpdir = None
def setUpModule():
global tmpdir
tmpdir = tempfile.mkdtemp(suffix='-test_sqlitecache', prefix='tmp-cherrymusic-')
if sys.version_info < (2, 7): # hack to support python 2.6 which doesn't setUpModule()
setUpModule()
def tearDownModule():
shutil.rmtree(tmpdir, ignore_errors=False, onerror=None)
def getAbsPath(*relpath):
'returns the absolute path for a path relative to the global testdir'
return os.path.join(tmpdir, *relpath)
def setupTestfile(testfile):
if testfile.isdir:
setupDir(testfile.fullpath)
# os.makedirs(testfile.fullpath, exist_ok=True)
else:
if not os.path.exists(testfile.fullpath):
open(testfile.fullpath, 'w').close()
assert testfile.exists
def setupTestfiles(testdir, testfiles):
testdir = os.path.join(tmpdir, testdir, '')
setupTestfile(TestFile(testdir))
for filename in testfiles:
filename = os.path.join(testdir, filename)
setupTestfile(TestFile(filename))
def setupDir(testdir):
import errno
try:
os.makedirs(testdir) #, exist_ok=True) # py2 compatibility
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(testdir):
pass
else:
raise
def removeTestfile(testfile):
if testfile.isdir:
os.rmdir(testfile.fullpath)
else:
os.remove(testfile.fullpath)
def removeTestfiles(testdir, testfiles):
testdir = os.path.join(tmpdir, testdir)
shutil.rmtree(testdir, ignore_errors=True, onerror=None)
class AddFilesToDatabaseTest(unittest.TestCase):
testdirname = 'empty'
def setupConfig(self):
cherry.config = configuration.from_defaults()
cherry.config = cherry.config.replace({'media.basedir': self.testdir})
def setUp(self):
self.testdir = getAbsPath(self.testdirname)
setupTestfiles(self.testdir, ())
self.setupConfig()
service.provide('dbconnector', MemConnector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
self.Cache.full_update()
def tearDown(self):
removeTestfiles(self.testdir, ())
self.Cache.conn.close()
def test_add_to_file_table(self):
parent = TestFile('test/', parent=None, isdir=True)
parent.uid = 42
file = TestFile('test/filename.extension', parent=parent, isdir=False)
# RUN
self.Cache.add_to_file_table(parent)
self.Cache.add_to_file_table(file)
self.assertTrue(file.uid >= 0, "file must have valid rowid")
colnames = ('parent', 'filename', 'filetype', 'isdir')
res = self.Cache.conn.execute('SELECT %s from files WHERE rowid=?'%(', '.join(colnames),), (file.uid,)).fetchall()
self.assertTrue(1 == len(res), "expect exactly one file with that uid")
self.assertTrue(len(colnames) == len(res[0]), "expect exactly %s colums stored per file, got %s" % (len(colnames),len(res[0])))
resdict = {}
i=0
for k in colnames:
resdict[k] = res[0][i]
i+=1
self.assertTrue(parent.uid == resdict['parent'], "correct parent id must be saved")
self.assertTrue('filename' == resdict['filename'], "filename must be saved without extension")
self.assertTrue('.extension' == resdict['filetype'], "extension must be saved with leading .")
self.assertFalse(resdict['isdir'], 'isdir must not be set in files table')
isdir = self.Cache.conn.execute('SELECT isdir from files WHERE rowid=?', (parent.uid,)).fetchone()[0]
self.assertTrue(isdir, "isdir must be saved correctly")
def test_add_to_dictionary_table(self):
"""searchable parts of a filename must be added to the dictionary as
words, and a list of unique word ids returned"""
filename = 'abc ÖÄUßé.wurst_-_blablabla.nochmal.wurst'
words = sqlitecache.SQLiteCache.searchterms(filename)
ids = self.Cache.add_to_dictionary_table(filename)
idset = set(ids)
self.assertTrue(len(ids) == len(idset), "there must be no duplicate ids")
for word in words:
cursor = self.Cache.conn.execute('SELECT rowid FROM dictionary WHERE word=?', (word,))
res = cursor.fetchall()
self.assertTrue(len(res) == 1, "there must be exactly one matching row per word")
self.assertTrue(res[0][0] in idset, "the wordid must be returned by the function")
idset.remove(res[0][0]) # make sure no other tested word can use that id to pass
self.assertTrue(len(idset) == 0, "there must not be more ids than unique words")
def test_add_to_search_table(self):
fileid = 99
wordids = (13, 42)
self.Cache.add_to_search_table(fileid, wordids)
for wid in wordids:
found = self.Cache.conn.execute('SELECT frowid FROM search WHERE drowid=?', (wid,)).fetchone()[0]
self.assertTrue(fileid == found, 'fileid must be associated with wordid')
def test_register_file_with_db(self):
testnames = (
'SUCHMICH',
'findmich suchmich',
'suchMICH blablub',
'wurst-mit-Suchmich.doch-schinken',
)
for filename in testnames:
self.Cache.register_file_with_db(TestFile(filename))
found = self.Cache.searchfor('SUCHMICH', 100)
#map musicentries to string
found = list(map(lambda x : x.path, found))
for filename in testnames:
self.assertTrue(filename in found, "all added files must be findable by cache search")
class FileTest(unittest.TestCase):
testdir = 'filetest'
testfiles = (
os.path.join('rootlevelfile'),
os.path.join('firstdir', ''),
os.path.join('firstdir', 'firstlevelfile'),
os.path.join('firstdir', 'seconddir', ''),
os.path.join('firstdir', 'seconddir', 'secondlevelfile'),
os.path.join('nonASCIItest', ''),
os.path.join('nonASCIItest', 'öäßÖÄÉ'),
)
def setUp(self):
setupTestfiles(self.testdir, self.testfiles)
def tearDown(self):
removeTestfiles(self.testdir, self.testfiles)
def assertFilesEqual(self, expected, actual):
self.assertTrue(actual.exists)
self.assertTrue(expected.fullpath == actual.fullpath, "equal fullpath %s vs %s" % (expected.fullpath, actual.fullpath))
self.assertTrue(expected.name == actual.name, "equal name %s vs %s " % (expected.name, actual.name))
self.assertTrue(expected.ext == actual.ext, 'equal extension %s vs %s' % (expected.ext, actual.ext))
self.assertTrue(expected.isdir == actual.isdir, 'equal dir flag %s vs %s (%s)' % (expected.isdir, actual.isdir, expected.fullpath))
def testFileClass(self):
for filename in self.testfiles:
filename = os.path.join(tmpdir, self.testdir, filename)
expected = TestFile(filename)
if filename.endswith(os.path.sep):
filename = filename[:-1]
actual = sqlitecache.File(filename)
self.assertFilesEqual(expected, actual)
class RemoveFilesFromDatabaseTest(unittest.TestCase):
testdirname = 'deltest'
testfiles = (
os.path.join('root_file'),
os.path.join('root_dir', ''),
os.path.join('root_dir', 'first_file'),
os.path.join('root_dir', 'first_dir', ''),
os.path.join('root_dir', 'first_dir', 'first_file'),
os.path.join('commonName', ''),
os.path.join('commonName', 'commonname_uniquename'),
)
fileobjects = {}
def setupConfig(self):
cherry.config = configuration.from_defaults()
cherry.config = cherry.config.replace({
'media.basedir': self.testdir,
})
def setupFileObjects(self):
testpath = os.path.abspath(self.testdir)
root = sqlitecache.File(testpath)
self.fileobjects[''] = root
for path in self.testfiles:
self.addPathToFileObjects(path, root)
def addPathToFileObjects(self, path, root):
path = path.rstrip(os.path.sep)
ref, base = os.path.split(path)
if ref:
if not ref in self.fileobjects:
self.addPathToFileObjects(ref, root)
parent = self.fileobjects[ref]
else:
parent = root
fob = sqlitecache.File(base, parent=parent)
self.id_fileobj(fob)
self.fileobjects[path] = fob
def setUp(self):
self.testdir = getAbsPath(self.testdirname)
setupTestfiles(self.testdir, self.testfiles)
self.setupConfig()
service.provide('dbconnector', MemConnector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
self.Cache.full_update()
self.setupFileObjects()
assert self.fileobjects[''].fullpath == os.path.abspath(self.testdir), \
'precondition: test rootdir has correct fullpath'
def tearDown(self):
removeTestfiles(self.testdir, self.testfiles)
self.Cache.conn.close()
def lookup_filename(self, filename, parentid):
return self.Cache.conn.execute(
'SELECT rowid FROM files WHERE parent=? AND filename=?',
(parentid, filename,))\
.fetchone()
def fileid_in_db(self, fileid):
return self.Cache.conn.execute('SELECT COUNT(*) FROM files'\
' WHERE rowid=?', (fileid,))\
.fetchone()[0]
def id_fileobj(self, fileobj):
'''fetches the db id for fileobj and saves it in fileobj.uid'''
if fileobj.parent is None:
pid = -1
else:
if fileobj.parent.uid == -1:
self.id_fileobj(fileobj.parent)
pid = fileobj.parent.uid
res = self.lookup_filename(fileobj.basename, pid)
if res is None:
if fileobj != fileobj.root: # testdir itself is not in db
log.w('fileobj not in database: %s', fileobj)
return
uid = res[0]
fileobj.uid = uid
def db_count(self, tablename):
query = 'SELECT COUNT(*) FROM ' + tablename
return self.Cache.conn.execute(query).fetchone()[0]
def testMissingFileIsRemovedFromDb(self):
fob = self.fileobjects['root_file']
removeTestfile(fob)
assert not fob.exists
assert self.fileid_in_db(fob.uid)
self.Cache.full_update()
self.assertFalse(self.fileid_in_db(fob.uid),
'file entry must be removed from db')
def testFilesWithSameNameAsMissingAreNotRemoved(self):
fob = self.fileobjects['root_dir/first_dir/first_file']
removeTestfile(fob)
beforecount = self.db_count('files')
self.Cache.full_update()
self.assertEqual(beforecount - 1, self.db_count('files'),
'exactly one file entry must be removed')
def get_fileobjects_for(self, dirname):
return [self.fileobjects[key] for key
in sorted(self.fileobjects.keys())
if key.startswith(dirname)]
def testMissingDirIsRemovedRecursively(self):
removelist = self.get_fileobjects_for('root_dir')
for fob in reversed(removelist):
removeTestfile(fob)
self.Cache.full_update()
for fob in removelist:
self.assertFalse(self.fileid_in_db(fob.uid),
'all children entries from removed dir must be removed')
def testRemoveFileAlsoRemovesSearchIndexes(self):
fob = self.fileobjects['root_file']
removeTestfile(fob)
self.Cache.full_update()
searchids = self.Cache.conn.execute('SELECT count(*) FROM search'
' WHERE frowid=?', (fob.uid,)) \
.fetchone()[0]
self.assertEqual(0, searchids,
'all search indexes referencing removed file must also be removed')
def testRemoveAllIndexesForWordRemovesWord(self):
fob = self.fileobjects[os.path.join('commonName', 'commonname_uniquename')]
removeTestfile(fob)
self.Cache.full_update()
unique = self.Cache.conn.execute('SELECT COUNT(*) FROM dictionary'
' WHERE word=?', ('uniquename',)) \
.fetchone()[0]
common = self.Cache.conn.execute('SELECT COUNT(*) FROM dictionary'
' WHERE word=?', ('commonname',)) \
.fetchone()[0]
self.assertEqual(0, unique,
'orphaned words must be removed')
self.assertEqual(1, common,
'words still referenced elsewhere must not be removed')
def testRollbackOnException(self):
class BoobytrappedConnector(MemConnector):
exceptcount = 0
def __init__(self):
super(self.__class__, self).__init__()
self.Connection = type(
str('%s.BoobytrappedConnection' % (self.__class__.__module__)),
(self.Connection,),
{'execute': self.__execute})
def __execute(connector, stmt, *parameters):
'''triggers an Exception when the 'undeletable' item should be
removed. relies on way too much knowledge of Cache internals. :(
'''
if stmt.lower().startswith('delete from files') \
and parameters[0][0] == undeletable.uid:
connector.exceptcount += 1
raise Exception("boom goes the dynamite")
return super(
connector.Connection,
connector.connection(sqlitecache.DBNAME)).execute(stmt, *parameters)
# SPECIAL SETUP
connector = BoobytrappedConnector()
service.provide('dbconnector', connector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
self.Cache.full_update()
removelist = self.get_fileobjects_for('root_dir')
for fob in removelist:
self.id_fileobj(fob)
for fob in reversed(removelist):
removeTestfile(fob)
undeletable = self.fileobjects[os.path.join('root_dir',
'first_dir',
'first_file')]
deletable = [self.fileobjects[os.path.join('root_dir',
'first_file')]]
# RUN
self.Cache.full_update()
removed = [f for f in removelist if not self.fileid_in_db(f.uid)]
# ASSERT
self.assertTrue(1 <= connector.exceptcount,
'test must have raised at least one exception')
self.assertEqual(deletable, removed,
# self.assertListEqual(deletable, removed,
'complete rollback must restore all deleted entries.')
class RandomEntriesTest(unittest.TestCase):
testdirname = 'randomFileEntries'
def setUp(self):
self.testdir = getAbsPath(self.testdirname)
setupTestfiles(self.testdir, ())
cherry.config = cherry.config.replace({'media.basedir': self.testdir})
service.provide('dbconnector', MemConnector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
return self
def register_files(self, *paths):
''' paths = ('dir/file', 'dir/subdir/') will register
- directories:
- dir/
- dir/subdir/
- files:
- /dir/file '''
files = {}
for path in paths:
previous = ''
for element in re.findall('\w+/?', path):
fullpath = previous + element
if fullpath not in files:
parent = files.get(previous, None)
fileobj = TestFile(element, parent=parent, isdir=element.endswith('/'))
self.Cache.register_file_with_db(fileobj)
files[fullpath] = fileobj
previous = fullpath
return files
def test_should_return_empty_sequence_when_no_files(self):
entries = self.Cache.randomFileEntries(10)
eq_(0, len(entries), entries)
def test_should_return_empty_sequence_when_zero_count(self):
entries = self.Cache.randomFileEntries(0)
eq_(0, len(entries), entries)
def test_should_return_all_entries_when_fewer_than_count(self):
self.register_files('a', 'b')
entries = self.Cache.randomFileEntries(10)
eq_(2, len(entries), entries)
def test_should_not_return_deleted_entries(self):
files = self.register_files('a', 'b', 'c')
self.Cache.remove_file(files['b'])
entries = self.Cache.randomFileEntries(10)
eq_(2, len(entries), entries)
def test_should_not_return_more_than_count_entries(self):
self.register_files('a', 'b', 'c')
entries = self.Cache.randomFileEntries(2)
ok_(2 >= len(entries), entries)
def test_should_not_return_dir_entries(self):
self.register_files('a_dir/a_subdir/')
entries = self.Cache.randomFileEntries(10)
eq_(0, len(entries), entries)
def test_can_handle_entries_in_subdirs(self):
self.register_files('dir/subdir/file')
entries = self.Cache.randomFileEntries(10)
eq_(1, len(entries), entries)
eq_('dir/subdir/file', entries[0].path, entries[0])
class SymlinkTest(unittest.TestCase):
testdirname = 'linktest'
testfiles = (
os.path.join('root_file'),
os.path.join('root_dir', ''),
)
def setUp(self):
self.testdir = getAbsPath(self.testdirname)
setupTestfiles(self.testdir, self.testfiles)
cherry.config = cherry.config.replace({'media.basedir': self.testdir})
service.provide('dbconnector', MemConnector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
def tearDown(self):
removeTestfiles(self.testdir, self.testfiles)
self.Cache.conn.close()
def enumeratedTestdir(self):
return [os.path.join(self.testdir, i.infs.relpath) for
i in self.Cache.enumerate_fs_with_db(self.testdir)]
@symlinktest
def testRootLinkOk(self):
link = os.path.join(self.testdir, 'link')
target = os.path.join(self.testdir, 'root_file')
os.symlink(target, link)
try:
self.assertTrue(link in self.enumeratedTestdir(),
'root level links must be returned')
finally:
os.remove(link)
@symlinktest
def testSkipDirSymlinksBelowBasedirRoot(self):
with tempdir('') as tmpd:
link = os.path.join(self.testdir, 'root_dir', 'link')
target = tmpd
os.symlink(target, link)
try:
self.assertFalse(link in self.enumeratedTestdir(),
'deeply nested dir link must not be returned')
finally:
os.remove(link)
@symlinktest
def testNoCyclicalSymlinks(self):
target = os.path.abspath(self.testdir)
link = os.path.join(self.testdir, 'link')
os.symlink(target, link)
try:
self.assertFalse(link in self.enumeratedTestdir(),
'cyclic link must not be returned')
finally:
os.remove(link)
class UpdateTest(unittest.TestCase):
testdirname = 'updatetest'
testfiles = (
os.path.join('root_file'),
os.path.join('root_dir', ''),
os.path.join('root_dir', 'first_file'),
)
def setupConfig(self):
cherry.config = configuration.from_defaults()
cherry.config = cherry.config.replace({
'media.basedir': self.testdir,
})
def setupCache(self):
service.provide('dbconnector', MemConnector)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
self.Cache = sqlitecache.SQLiteCache()
self.Cache.full_update()
def clearCache(self):
self.Cache.conn.execute('delete from files')
self.Cache.conn.execute('delete from dictionary')
self.Cache.conn.execute('delete from search')
def setUp(self):
self.testdir = getAbsPath(self.testdirname)
setupTestfiles(self.testdir, self.testfiles)
self.setupConfig()
self.setupCache()
def tearDown(self):
removeTestfiles(self.testdir, self.testfiles)
self.Cache.conn.close()
def test_enumerate_add(self):
'''items not in db must be enumerated'''
self.clearCache()
lister = self.Cache.enumerate_fs_with_db(self.testdir)
expected_files = [f.rstrip(os.path.sep) for f in self.testfiles]
lister.send(None) # skip first item
for item in lister:
self.assertEqual(None, item.indb, 'database part must be empty, found: %s' % item.indb)
self.assertTrue(item.infs.relpath in expected_files, '%s %s' % (item.infs.relpath, expected_files))
expected_files.remove(item.infs.relpath)
self.assertEqual(0, len(expected_files))
def test_enumerate_delete(self):
'''items not in fs must be enumerated'''
removeTestfiles(self.testdir, self.testfiles)
lister = self.Cache.enumerate_fs_with_db(self.testdir)
expected_files = [f.rstrip(os.path.sep) for f in self.testfiles]
lister.send(None) # skip first item
for item in lister:
self.assertEqual(None, item.infs, 'filesystem part must be empty, found: %s' % item.indb)
self.assertTrue(item.indb.relpath in expected_files, '%s %s' % (item.indb.relpath, expected_files))
expected_files.remove(item.indb.relpath)
self.assertEqual(0, len(expected_files))
def test_enumerate_same(self):
'''unchanged fs must have equal db'''
lister = self.Cache.enumerate_fs_with_db(self.testdir)
expected_files = [f.rstrip(os.path.sep) for f in self.testfiles]
lister.send(None) # skip first item
for item in lister:
self.assertEqual(item.infs.fullpath, item.indb.fullpath)
self.assertEqual(item.infs.isdir, item.indb.isdir)
self.assertTrue(item.indb.relpath in expected_files, '%s %s' % (item.indb.relpath, expected_files))
expected_files.remove(item.indb.relpath)
self.assertEqual(0, len(expected_files))
def test_new_file_in_known_dir(self):
newfile = os.path.join('root_dir', 'second_file')
setupTestfiles(self.testdir, (newfile,))
self.Cache.full_update()
self.assertNotEqual(None, self.Cache.db_find_file_by_path(getAbsPath(self.testdir, newfile)),
'file must have been added correctly to the database')
def test_partial_update(self):
newfiles = (
os.path.join('root_dir', 'sub_dir', ''),
os.path.join('root_dir', 'sub_dir', 'a_file'),
os.path.join('root_dir', 'sub_dir', 'another_file'),
)
setupTestfiles(self.testdir, newfiles)
path_to = lambda x: getAbsPath(self.testdir, x)
msg = 'after updating newpath, all paths in newpath must be in database'
self.Cache.partial_update(path_to(newfiles[0]))
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[0])), msg)
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[1])), msg)
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[2])), msg)
msg = 'after updating samepath, all paths in samepath must be in database'
self.Cache.partial_update(path_to(newfiles[0]))
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[0])), msg)
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[1])), msg)
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[2])), msg)
removeTestfiles(self.testdir, newfiles)
msg = 'after updating removedpath, all paths in reomevpath must be gone from database'
self.Cache.partial_update(path_to(newfiles[0]))
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[0])), msg)
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[1])), msg)
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[2])), msg)
setupTestfiles(self.testdir, newfiles)
msg = 'after updating newpath/subpath, only newpath and subpath must be in database, not othersubpath'
self.Cache.partial_update(path_to(newfiles[1]))
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[0])), msg)
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[1])), msg)
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[2])), msg)
removeTestfiles(self.testdir, newfiles)
msg = 'after updating removedpath/subpath, subpath most be gone from database, removedpath must still be there'
self.Cache.partial_update(path_to(newfiles[1]))
self.assertNotEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[0])), msg)
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[1])), msg)
self.assertEqual(None, self.Cache.db_find_file_by_path(path_to(newfiles[2])), msg)
def setup_cache(testfiles=()):
""" Sets up a SQLiteCache instance bound to current `media.basedir`.
The basedir is assumed to exist (as it must) and can be initialized
with directories and (empty) files.
:param list testfiles: Strings of filenames. Names ending in '/' are directories.
"""
database.resetdb(sqlitecache.DBNAME)
database.ensure_current_version(sqlitecache.DBNAME, autoconsent=True)
cache = sqlitecache.SQLiteCache()
basedir = cherry.config['media.basedir']
assert not os.listdir(basedir)
for filename in testfiles:
fullpath = os.path.join(basedir, filename)
setupTestfile(TestFile(fullpath))
cache.full_update()
return cache
def cachetest(func):
""" Function decorator that provides a basic CherryMusic context, complete
with a temporary `media.basedir`.
"""
testname = '{0}.{1}'.format(func.__module__ , func.__name__)
def wrapper(*args, **kwargs):
with tempdir(testname) as basedir:
testfunc = cherrytest({'media.basedir': basedir})(func)
testfunc(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
@cachetest
def test_listdir():
basedir_contents = ['some_file']
cache = setup_cache(basedir_contents)
assert basedir_contents == cache.listdir('')
assert basedir_contents == cache.listdir('.')
assert basedir_contents == cache.listdir('./.')
assert [] == cache.listdir('/.')
assert [] == cache.listdir('..')
assert [] == cache.listdir('./..')
@cachetest
def test_search_nonascii():
""" searchfor can handle and find non-ascii """
basedir_contents = ['ä.mp3']
cache = setup_cache(basedir_contents)
found = cache.searchfor('ä')
assert len(found) == 1
assert found[0].path == basedir_contents[0] # found MusicEntry
@symlinktest
@cachetest
def test_symlinks_to_files_are_indexed():
""" deep file symlinks are indexed """
# create 'file', 'dir/link' --> 'file'
cache = setup_cache(['file', 'dir/'])
basedir = cherry.config['media.basedir']
src = os.path.join(basedir, 'file')
dst = os.path.join(basedir, 'dir', 'link')
os.symlink(src, dst)
assert os.path.isfile(dst)
cache.full_update()
assert cache.searchfor('link')
if __name__ == "__main__":
nose.runmodule()
| 31,659 | Python | .py | 664 | 37.463855 | 139 | 0.625163 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,383 | helpers.py | devsnd_cherrymusic/cherrymusicserver/test/helpers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Things that are helpful when testing the CherryMusic backend """
import os
import shutil
import tempfile
from contextlib import contextmanager
from mock import *
from nose.tools import *
import cherrymusicserver
from cherrymusicserver import configuration
from cherrymusicserver import database
from cherrymusicserver import service
_default_config = configuration.from_defaults() # load only once
@contextmanager
def cherryconfig(override=None):
""" Context manager providing a CherryMusic default configuration
that can be overridden.
:param dict override: The overridden config values
"""
override = override or {}
config = cherrymusicserver.config or configuration.from_defaults()
config = config.update(override)
with patch('cherrymusicserver.config', config):
yield config
@contextmanager
def tempdir(name_hint, keep=False):
""" Context manager providing a temp directory to do stuff in.
Yields the dir name. Deletes the directory on exit by default.
:param str name_hint: Part of the temp dir's name
:param bool keep: If true, don't delete the directory.
"""
try:
tmpdir = tempfile.mkdtemp(prefix=name_hint + '.')
yield tmpdir
finally:
if not keep:
shutil.rmtree(tmpdir, ignore_errors=False, onerror=None)
def mkpath(name, parent='.', content=''):
""" Creates a file or subdir in directory ``parent``.
If ``name.endswith('/')``, a directory is created, otherwise a file.
``content`` will be written to created files; specifying content
when trying to create a directory will raise an AssertionError.
"""
mkdir = name.endswith('/')
name = name.rstrip('/')
assert not (mkdir and content)
abspath = os.path.abspath(os.path.join(parent, name))
if mkdir:
os.mkdir(abspath)
assert os.path.isdir(abspath)
else:
with open(abspath, "w") as newfile:
if content:
newfile.write(content)
assert os.path.isfile(abspath)
if content:
assert bool(os.path.getsize(abspath))
return abspath
@contextmanager
def dbconnector(connector=None):
""" Context manager providing a 'dbconnector' service
:param database.AbstractConnector connector: Connector instance. MemConnector by default.
"""
connector = connector or database.sql.MemConnector()
real_get = service.get
fake_get = lambda handle: connector if handle == 'dbconnector' else real_get(handle)
with patch('cherrymusicserver.service.get', fake_get):
yield connector
_dbconnector = dbconnector
def cherrytest(config=None, dbconnector=None):
""" Function decorator that does some standard CherryMusic setup.
It wraps the function call into a :func:`cherryconfig` and
:func:`dbconnector` context.
"""
def decorator(func):
def wrapper(*args, **kwargs):
with cherryconfig(config):
with _dbconnector(dbconnector):
func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
def symlinktest(func):
''' Decorator that returns a no-op function if symlinks are not supported '''
if can_symlink():
return func
return lambda *a, **kw: None
def can_symlink():
''' Returns True if the OS environment allows symlinking '''
global __can_symlink
try:
ok = __can_symlink
except NameError:
# actually try to symlink instead of ruling out entire OS families
with tempdir('cherrymusic_symlinktest') as tmpd:
try:
os.symlink(tmpd, os.path.join(tmpd, 'symlink'))
except (NameError, NotImplementedError, OSError):
# Windows: older than Vista or no privileges to symlink
# https://docs.python.org/3.2/library/os.html#os.symlink
ok = __can_symlink = False
else:
ok = __can_symlink = True
return ok
| 5,186 | Python | .py | 134 | 33.074627 | 97 | 0.687749 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,384 | test_init.py | devsnd_cherrymusic/cherrymusicserver/test/test_init.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver import log
log.setTest()
import cherrymusicserver as cherry
def setup():
cherry.CherryMusic.setup_services()
cherry.service.provide('dbconnector', cherry.database.sql.MemConnector)
def test_server_wont_start_without_valid_basedir():
target_cfg = {'media.basedir': None} # invalid basedir defaults to None
class StopException(Exception):
pass
mock_stop = Mock(side_effect=StopException)
with patch('cherrymusicserver.config', target_cfg):
with patch('cherrymusicserver.CherryMusic.setup_config') as mock_setup:
with patch('sys.exit', mock_stop):
assert_raises(StopException, cherry.CherryMusic)
assert mock_setup.called
if __name__ == '__main__':
nose.runmodule()
| 1,930 | Python | .py | 51 | 34.823529 | 79 | 0.738197 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,385 | test_cherrymodel.py | devsnd_cherrymusic/cherrymusicserver/test/test_cherrymodel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
import os
from mock import *
from nose.tools import *
from cherrymusicserver.test.helpers import cherrytest, tempdir, mkpath, cherryconfig, symlinktest
from cherrymusicserver import log
log.setTest()
from cherrymusicserver import cherrymodel
from cherrymusicserver import sqlitecache
def config(cfg=None):
c = {'media.basedir': os.path.join(os.path.dirname(__file__), 'data_files')}
if cfg:
c.update(cfg)
return c
@cherrytest(config({'browser.pure_database_lookup': False}))
def test_hidden_names_listdir():
import cherrymusicserver as cherry
basedir_listing = sorted(os.listdir(cherry.config['media.basedir']))
eq_(['.hidden.mp3', 'empty_file.mp3', 'not_hidden.mp3'], basedir_listing)
model = cherrymodel.CherryModel()
dir_listing = model.listdir('')
assert len(dir_listing) == 1, str(dir_listing)
assert dir_listing[0].path == 'not_hidden.mp3'
@raises(ValueError)
@cherrytest(config({'browser.pure_database_lookup': False}))
def test_listdir_in_filesystem_must_be_inside_basedir():
model = cherrymodel.CherryModel()
model.listdir('./../')
# sqlitecache is covered in test_sqlitecache.test_listdir()
@cherrytest(config({'search.maxresults': 10}))
@patch('cherrymusicserver.cherrymodel.CherryModel.cache')
@patch('cherrymusicserver.cherrymodel.cherrypy')
def test_hidden_names_search(cherrypy, cache):
model = cherrymodel.CherryModel()
cache.searchfor.return_value = [cherrymodel.MusicEntry('.hidden.mp3', dir=False)]
assert not model.search('something')
cache.searchfor.return_value = [cherrymodel.MusicEntry('not_hidden.mp3', dir=False)]
assert model.search('something')
@cherrytest(config({'browser.pure_database_lookup': True}))
@patch('cherrymusicserver.cherrymodel.CherryModel.cache')
def test_listdir_deleted_files(cache):
"cherrymodel.listdir should work when cached files don't exist anymore"
model = cherrymodel.CherryModel()
cache.listdir.return_value = ['deleted.mp3']
eq_([], model.listdir(''))
@symlinktest
@cherrytest(config({'browser.pure_database_lookup': False}))
def test_listdir_bad_symlinks():
"cherrymodel.listdir should work when cached files don't exist anymore"
model = cherrymodel.CherryModel()
with tempdir('test_listdir_bad_symlinks') as tmpdir:
with cherryconfig({'media.basedir': tmpdir}):
os.symlink('not_there', os.path.join(tmpdir, 'badlink'))
eq_([], model.listdir(''))
@cherrytest(config({'browser.pure_database_lookup': False}))
def test_listdir_unreadable():
"cherrymodel.listdir should return empty when dir is unreadable"
model = cherrymodel.CherryModel()
with tempdir('test_listdir_unreadable') as tmpdir:
with cherryconfig({'media.basedir': tmpdir}):
os.chmod(tmpdir, 0o311)
try:
open(os.path.join(tmpdir, 'file.mp3'), 'a').close()
eq_([], model.listdir(''))
finally:
# Ensure tmpdir can be cleaned up, even if test fails
os.chmod(tmpdir, 0o755)
@cherrytest(config({'media.transcode': False}))
def test_randomMusicEntries():
model = cherrymodel.CherryModel()
def makeMusicEntries(n):
return [cherrymodel.MusicEntry(str(i)) for i in range(n)]
with patch('cherrymusicserver.cherrymodel.CherryModel.cache') as mock_cache:
with patch('cherrymusicserver.cherrymodel.CherryModel.isplayable') as mock_playable:
mock_cache.randomFileEntries.side_effect = makeMusicEntries
mock_playable.return_value = True
eq_(2, len(model.randomMusicEntries(2)))
mock_playable.return_value = False
eq_(0, len(model.randomMusicEntries(2)))
@cherrytest({'media.transcode': False})
def test_isplayable():
""" existing, nonempty files of supported types should be playable """
model = cherrymodel.CherryModel()
with patch(
'cherrymusicserver.cherrymodel.CherryModel.supportedFormats', ['mp3']):
with tempdir('test_isplayable') as tmpdir:
mkfile = lambda name, content='': mkpath(name, tmpdir, content)
mkdir = lambda name: mkpath(name + '/', tmpdir)
with cherryconfig({'media.basedir': tmpdir}):
isplayable = model.isplayable
assert isplayable(mkfile('ok.mp3', 'content'))
assert not isplayable(mkfile('empty.mp3'))
assert not isplayable(mkfile('bla.unsupported', 'content'))
assert not isplayable(mkdir('directory.mp3'))
assert not isplayable('inexistant')
@cherrytest({'media.transcode': True})
def test_is_playable_by_transcoding():
""" filetypes should still be playable if they can be transcoded """
from audiotranscode import AudioTranscode
with patch('audiotranscode.AudioTranscode', spec=AudioTranscode) as ATMock:
ATMock.return_value = ATMock
ATMock.available_decoder_formats.return_value = ['xxx']
with tempdir('test_isplayable_by_transcoding') as tmpdir:
with cherryconfig({'media.basedir': tmpdir}):
track = mkpath('track.xxx', parent=tmpdir, content='xy')
model = cherrymodel.CherryModel()
ok_(model.isplayable(track))
@cherrytest({'media.transcode': False})
@patch('cherrymusicserver.cherrymodel.cherrypy', MagicMock())
def test_search_results_missing_in_filesystem():
"inexistent MusicEntries returned by sqlitecache search should be ignored"
cache_finds = [
cherrymodel.MusicEntry('i-dont-exist.dir', dir=True),
cherrymodel.MusicEntry('i-dont-exist.mp3', dir=False),
cherrymodel.MusicEntry('i-exist.dir', dir=True),
cherrymodel.MusicEntry('i-exist.mp3', dir=False),
]
mock_cache = Mock(spec=sqlitecache.SQLiteCache)
mock_cache.searchfor.return_value = cache_finds
model = cherrymodel.CherryModel()
model.cache = mock_cache
with tempdir('test_cherrymodel_search_missing_results') as tmpdir:
mkpath('i-exist.dir/', tmpdir)
mkpath('i-exist.mp3', tmpdir, 'some content')
with cherryconfig({'media.basedir': tmpdir}):
results = model.search('the query')
eq_(set(cache_finds[2:]), set(results))
if __name__ == '__main__':
nose.runmodule()
| 7,416 | Python | .py | 159 | 40.874214 | 97 | 0.701178 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,386 | test_service.py | devsnd_cherrymusic/cherrymusicserver/test/test_service.py | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import unittest
from cherrymusicserver import service
class TestService(unittest.TestCase):
def test_mutual_dependency(self):
@service.user(myfoo='fooservice')
class Reflecto(object):
def __init__(self):
service.provide('fooservice', self.__class__)
assert self.myfoo
self.assertRaises(service.MutualDependencyBreak, Reflecto)
| 1,466 | Python | .py | 39 | 34.564103 | 70 | 0.737324 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,387 | migration_0002.py | devsnd_cherrymusic/cherrymusicserver/migrations/migration_0002.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""
Moves cherrymusic.conf to correct location in OS X
~/Application Support/cherrymusic => ~/Library/Application Support/cherrymusic
See https://github.com/devsnd/cherrymusic/issues/459
"""
#python 2.6+ backward compability
from __future__ import unicode_literals
import os
import shutil
def migrate():
oldpath = os.path.join(os.path.expanduser('~'), 'Application Support', 'cherrymusic')
newpath = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', 'cherrymusic')
oldpath_exists = os.path.exists(oldpath)
newpath_exists = os.path.exists(newpath)
if oldpath_exists:
if newpath_exists:
# two data/conf directories, just warn and skip.
print("""There are two different data/config directories,
but normally that shouldn't happen. The old and unused one is here:
%s
The currently used one is here:
%s
You can keep either one, and cherrymusic will figure it out on the next
start.""" % (oldpath, newpath))
else:
# standard migration case. old one exists, but new one
# does not
print('UPDATE: Moving config/data directory to new location...')
shutil.move(oldpath, newpath)
print('UPDATE: done.')
| 2,299 | Python | .py | 59 | 35.864407 | 100 | 0.724955 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,388 | __init__.py | devsnd_cherrymusic/cherrymusicserver/migrations/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Contains migrations of CherryMusic data; for database migrations, see
the database package.
A migration is a module named `migration_nnnn` , where `nnnn` is one of a
series of consecutive integers starting with `0001`.
A migration has a `migrate()` function which will be called to execute the
migration; the function is expected to decide whether the migration should
be carried out or not. If a migration requires manual intervention or needs
to be aborted for any reason, the function should print an actionable error
message and sys.exit with an error status.
"""
#python 2.6+ backward compability
from __future__ import unicode_literals
import os
import pkgutil
import re
from backport import callable # Py 3.0 and 3.1 compat
_MIGRATIONS_PATH = os.path.dirname(__file__)
_NAME_PATTERN = re.compile('^migration_\d{4}$')
def check_and_migrate_all():
""" Runs all necessary migrations in alphabetical order.
The program can sys.exit if manual intervention is required.
"""
for migration in iter_load_migrations():
migration.migrate()
def iter_load_migrations():
""" Generator which imports and returns all migration modules in
alphabetical order.
"""
itermods = pkgutil.iter_modules([_MIGRATIONS_PATH])
modnames = sorted(name for _, name, ispkg in itermods
if not ispkg and _NAME_PATTERN.match(name))
return (_import_migration(name) for name in modnames)
def _import_migration(modulename):
qualname = 'cherrymusicserver.migrations.' + modulename
mig = __import__(qualname, fromlist='dummy')
if not callable(getattr(mig, 'migrate', None)):
raise TypeError('Migration needs a `migrate` function: ' + mig.__name__)
return mig
| 2,847 | Python | .py | 68 | 38.897059 | 80 | 0.738879 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,389 | migration_0001.py | devsnd_cherrymusic/cherrymusicserver/migrations/migration_0001.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""Prompts for manual migration of CherryMusic config and data to XDG dirs
See https://github.com/devsnd/cherrymusic/issues/224
"""
#python 2.6+ backward compability
from __future__ import unicode_literals
import os
import sys
from cherrymusicserver import pathprovider
def migrate():
"""If config file is still in old location, print a message and exit(1)"""
if pathprovider.configurationFileExists() or not pathprovider.fallbackPathInUse():
return
_printMigrationNotice()
sys.exit(1)
def _printMigrationNotice():
print(_("""
==========================================================================
Oops!
CherryMusic changed some file locations while you weren't looking.
(To better comply with best practices, if you wanna know.)
To continue, please move the following:
$ mv {src} {tgt}""".format(
src=os.path.join(pathprovider.fallbackPath(), 'config'),
tgt=pathprovider.configurationFile()) + """
$ mv {src} {tgt}""".format(
src=os.path.join(pathprovider.fallbackPath(), '*'),
tgt=pathprovider.getUserDataPath()) + """
Thank you, and enjoy responsibly. :)
==========================================================================
"""))
| 2,297 | Python | .py | 60 | 36.083333 | 86 | 0.692584 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,390 | migration_0003.py | devsnd_cherrymusic/cherrymusicserver/migrations/migration_0003.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Migrate album art filenames from old base64 encoding to md5 hash """
#python 2.6+ backward compability
from __future__ import unicode_literals
import base64
import codecs
import errno
import logging
import os
import re
import shutil
import sys
from cherrymusicserver import pathprovider
log = logging.getLogger(__name__)
_DONE_FILE = '.hashpath'
_MIGRATION_SUFFIX = '-migrated'
_ERR_MSG = """Error while migrating saved album art.
Fix the cause of the error and restart to continue.
Unmigrated files are in your album art folder:
{{artfolder}}
Migrated files are in the migration folder:
{{artfolder}}{suffix}
To abort the migration and lose the unmigrated album art:
- delete or move the album art folder;
- rename the migration folder by removing "{suffix}";
- create a file "{donefile}" in the folder (file can be empty).
""".format(suffix=_MIGRATION_SUFFIX, donefile=_DONE_FILE)
def migrate():
""" Migrate if necessary, exit(1) on error """
artfolder = pathprovider.albumArtFilePath('')
hashpathfile = os.path.join(artfolder, _DONE_FILE)
if os.path.exists(hashpathfile):
log.debug('Saved album art filenames already migrated. Skipping.')
return
try:
log.info('Migrating saved album art filenames...')
_migrate(artfolder)
except:
errmsg = _ERR_MSG.format(artfolder=artfolder)
log.exception('Error during album art migration')
log.critical(errmsg)
sys.exit(1)
else:
open(hashpathfile, 'a').close()
log.info('Album art filename migration done.')
def _migrate(sourcedir):
""" migrate into different dir and then swap it in, to mitigate mishaps """
targetdir = sourcedir.rstrip(os.path.sep) + _MIGRATION_SUFFIX
try:
os.mkdir(targetdir)
except OSError as err:
# ignore if targetdir exists to allow restarting aborted migrations
if err.errno != errno.EEXIST:
raise
_base64_artfile_regex = re.compile(
'^'
'([\da-zA-Z+-]{4})*' # pathprovider uses '-' instead of '\'
'([\da-zA-Z+-]{3}=|[\da-zA-Z+-]{2}==)?'
'$')
is_base64 = lambda s: bool(_base64_artfile_regex.match(s))
all_filenames = os.listdir(sourcedir)
migratable_filenames = (f for f in all_filenames if is_base64(f))
unmigratable_filenames = (f for f in all_filenames if not is_base64(f))
# move any non-albumart files first (which shouldn't exist, but who knows)
for filename in unmigratable_filenames:
oldpath = os.path.join(sourcedir, filename)
newpath = os.path.join(targetdir, filename)
_move_if_exists(oldpath, newpath)
# migrate albumart files
for filename in migratable_filenames:
ownerpath = _base64decode(filename)
oldpath = os.path.join(sourcedir, filename)
newname = os.path.basename(pathprovider.albumArtFilePath(ownerpath))
newpath = os.path.join(targetdir, newname)
_move_if_exists(oldpath, newpath)
os.rmdir(sourcedir)
os.rename(targetdir, sourcedir)
def _move_if_exists(oldpath, newpath):
try:
shutil.move(oldpath, newpath)
except OSError as err:
# ignore errors from existing newpath and missing oldpath,
# which might occur if this migration is (concurrently) executed more
# than once by mistake
if err.errno not in (errno.EEXIST, errno.ENOENT):
raise
def _base64decode(s):
""" decode old albumart base64 encoding; copied code from pathprovider """
utf8_bytestr = codecs.encode(s, 'UTF-8')
utf8_altchar = codecs.encode('+-', 'UTF-8')
return codecs.decode(base64.b64decode(utf8_bytestr, utf8_altchar), 'UTF-8')
if __name__ == "__main__": # pragma: no cover
migrate()
| 4,843 | Python | .py | 121 | 35.512397 | 79 | 0.69917 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,391 | test_migration_0003.py | devsnd_cherrymusic/cherrymusicserver/migrations/test/test_migration_0003.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver.test import helpers
import base64
import codecs
import hashlib
import os
from cherrymusicserver import pathprovider
from cherrymusicserver.migrations import migration_0003
def test_migration():
tests = [
['empty dir',
[],
['.hashpath']],
['migrated dir',
['.hashpath', _oldname('b/c')],
['.hashpath', _oldname('b/c')]],
['standard migration',
[_oldname('a'), _oldname('b/c')],
[_newname('a'), _newname('b/c'), '.hashpath']],
['hidden file',
['.foo', _oldname('b/c')],
['.foo', _newname('b/c'), '.hashpath']],
['invalid base64 encoding',
['badbase64='],
['badbase64=', '.hashpath']],
]
for description, startnames, wantnames in tests:
check_filelist.description = 'migration_0003 (albumart): ' + description
yield check_filelist, startnames, wantnames
def check_filelist(startnames, wantnames):
with helpers.tempdir('cherrymusic.test_migration_0003') as tmpd:
artfolder = helpers.mkpath('art/', tmpd)
for name in startnames:
helpers.mkpath(name, artfolder)
with patch('cherrymusicserver.pathprovider.albumArtFilePath', _mock_artpath(artfolder)):
migration_0003.migrate()
expected, result = sorted(wantnames), sorted(os.listdir(artfolder))
eq_(expected, result, '\n%r\n%r' % (expected, result))
def _oldname(s):
"copied from pathprovider"
utf8_bytestr = codecs.encode(s, 'UTF-8')
utf8_altchar = codecs.encode('+-', 'UTF-8')
return codecs.decode(base64.b64encode(utf8_bytestr, utf8_altchar), 'UTF-8')
def _newname(s):
utf8_bytestr = codecs.encode(s, 'UTF-8')
return hashlib.md5(utf8_bytestr).hexdigest() + '.thumb'
_real_artpath = pathprovider.albumArtFilePath
def _mock_artpath(tmpd):
return lambda s: os.path.join(tmpd, os.path.basename(_real_artpath(s)) if s else '')
if __name__ == '__main__':
nose.runmodule()
| 3,176 | Python | .py | 83 | 33.566265 | 96 | 0.675992 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,392 | zipstream.py | devsnd_cherrymusic/cherrymusicserver/ext/zipstream.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This library was created by SpiderOak, Inc. and is released under the GPLv3.
https://github.com/gourneau/SpiderOak-zipstream
Iterable ZIP archive genrator.
Derived directly from zipfile.py
"""
import struct, os, time, sys
import binascii
import codecs
try:
import zlib # We may need its compression method
except ImportError:
zlib = None
__all__ = ["ZIP_STORED", "ZIP_DEFLATED", "ZipStream"]
ZIP64_LIMIT= (1 << 31) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Here are some struct module formats for reading headers
structEndArchive = b"<4s4H2lH" # 9 items, end of archive, 22 bytes
stringEndArchive = b"PK\005\006" # magic number for end of archive record
structCentralDir = b"<4s4B4HILL5HLI"# 19 items, central directory, 46 bytes
stringCentralDir = b"PK\001\002" # magic number for central directory
structFileHeader = b"<4s2B4HlLL2H" # 12 items, file header record, 30 bytes
stringFileHeader = b"PK\003\004" # magic number for file header
structEndArchive64Locator = b"<4slql" # 4 items, locate Zip64 header, 20 bytes
stringEndArchive64Locator = b"PK\x06\x07" # magic token for locator header
structEndArchive64 = b"<4sqhhllqqqq" # 10 items, end of archive (Zip64), 56 bytes
stringEndArchive64 = b"PK\x06\x06" # magic token for Zip64 header
stringDataDescriptor = b"PK\x07\x08" # magic number for data descriptor
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4 # is this meaningful?
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# indexes of entries in the local file header structure
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2 # is this meaningful?
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = codecs.encode(filename, 'UTF-8') # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0x08 # ZIP flag bits, bit 3 indicates presence of data descriptor
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def DataDescriptor(self):
if self.compress_size > ZIP64_LIMIT or self.file_size > ZIP64_LIMIT:
fmt = "<4sIQQ"
else:
fmt = "<4sILL"
return struct.pack(fmt, stringDataDescriptor, self.CRC, self.compress_size, self.file_size)
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<hhqq'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff # -1
compress_size = 0xffffffff # -1
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, self.flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(self.filename), len(extra))
return header + self.filename + extra
class ZipStream:
"""
"""
def __init__(self, paths, arc_path='', compression=ZIP_DEFLATED):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError("Compression requires the (missing) zlib module")
else:
raise RuntimeError("That compression method is not supported")
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.paths = paths # source paths
self.arc_path = arc_path # top level path in archive
self.data_ptr = 0 # Keep track of location inside archive
def __iter__(self):
for path in self.paths:
for data in self.zip_path(path, self.arc_path):
yield data
yield self.archive_footer()
def update_data_ptr(self, data):
"""As data is added to the archive, update a pointer so we can determine
the location of various structures as they are generated.
data -- data to be added to archive
Returns data
"""
self.data_ptr += len(data)
return data
def zip_path(self, path, archive_dir_name):
"""Recursively generate data to add directory tree or file pointed to by
path to the archive. Results in archive containing
archive_dir_name/basename(path)
archive_dir_name/basename(path)/*
archive_dir_name/basename(path)/*/*
.
.
.
path -- path to file or directory
archive_dir_name -- name of containing directory in archive
"""
if os.path.isdir(path):
dir_name = os.path.basename(path)
for name in os.listdir(path):
r_path = os.path.join(path, name)
r_archive_dir_name = os.path.join(archive_dir_name, dir_name)
for data in self.zip_path(r_path, r_archive_dir_name):
yield data
else:
archive_path = os.path.join(archive_dir_name, os.path.basename(path))
for data in self.zip_file(path, archive_path):
yield data
def zip_file(self, filename, arcname=None, compress_type=None):
"""Generates data to add file at 'filename' to an archive.
filename -- path to file to add to arcive
arcname -- path of file inside the archive
compress_type -- unused in ZipStream, just use self.compression
This function generates the data corresponding to the fields:
[local file header n]
[file data n]
[data descriptor n]
as described in section V. of the PKZIP Application Note:
http://www.pkware.com/business_and_developers/developer/appnote/
"""
st = os.stat(filename)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.header_offset = self.data_ptr # Start of header bytes
fp = open(filename, "rb")
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
yield self.update_data_ptr(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = binascii.crc32(buf, CRC)
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
yield self.update_data_ptr(buf)
fp.close()
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
yield self.update_data_ptr(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = abs(CRC)
zinfo.file_size = file_size
yield self.update_data_ptr(zinfo.DataDescriptor())
self.filelist.append(zinfo)
def archive_footer(self):
"""Returns data to finish off an archive based on the files already
added via zip_file(...). The data returned corresponds to the fields:
[archive decryption header]
[archive extra data record]
[central directory]
[zip64 end of central directory record]
[zip64 end of central directory locator]
[end of central directory record]
as described in section V. of the PKZIP Application Note:
http://www.pkware.com/business_and_developers/developer/appnote/
"""
data = []
count = 0
pos1 = self.data_ptr
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff #-1
compress_size = 0xffffffff #-1
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = -1 # struct "l" format: 32 one bits
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack('<hh' + 'q'*len(extra),1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
data.append( self.update_data_ptr(centdir))
data.append( self.update_data_ptr(zinfo.filename))
data.append( self.update_data_ptr(extra_data))
data.append( self.update_data_ptr(zinfo.comment))
pos2 = self.data_ptr
# Write end-of-zip-archive record
if pos1 > ZIP64_LIMIT:
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, count, count, pos2 - pos1, pos1)
data.append( self.update_data_ptr(zip64endrec))
zip64locrec = struct.pack(structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
data.append( self.update_data_ptr(zip64locrec))
# XXX Why is `pos3` computed next? It's never referenced.
pos3 = self.data_ptr
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, count, count, pos2 - pos1, -1, 0)
data.append( self.update_data_ptr(endrec))
else:
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, count, count, pos2 - pos1, pos1, 0)
data.append( self.update_data_ptr(endrec))
return b''.join(data)
if __name__ == "__main__":
zipfile = sys.argv[1]
path = sys.argv[2]
zf = open(zipfile, 'wb')
for data in ZipStream(path):
zf.write(data)
zf.close()
| 15,807 | Python | .py | 353 | 34.68272 | 101 | 0.592154 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,393 | test_v1.py | devsnd_cherrymusic/cherrymusicserver/api/test/test_v1.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" CherryMusic Server API integration tests
Parse API spec file to obtain requests and expected responses, and test
them against the server.
Needs ``pyyaml`` for the spec file and ``/usr/bin/curl`` as a convenient
HTTP client.
"""
from __future__ import unicode_literals
import json
import os
import random
import re
import subprocess
import threading
import time
import cherrypy
import nose
import yaml
# http://pyyaml.org/ticket/59
# def __unicode_str_constructor(loader, node):
# value = loader.construct_scalar(node)
# if not isinstance(value, type('')):
# return value.decode('UTF-8')
# return value
# yaml.add_constructor('tag:yaml.org,2002:str', __unicode_str_constructor)
from mock import *
from nose.tools import *
from cherrymusicserver.test import helpers
import cherrymusicserver as cms
CHERRYMUSIC_URL = 'http://localhost:{port}/'
def default_specpath():
filename = 'spec.yml'
apidir = os.path.dirname(cms.api.v1.__file__)
return os.path.join(apidir, filename)
def load_spec(filepath=None):
filepath = filepath or default_specpath()
with open(filepath) as f:
rawspec = f.read()
return yaml.load(rawspec)
def setup_module():
global _server
global _spec
_spec = load_spec()
_server = APIServer()
_server.start()
time.sleep(0.3) # wait for server to start
def teardown_module():
_server.stop()
def test_resources():
srvurl = CHERRYMUSIC_URL.format(port=_server.port)
baseurl = srvurl.rstrip('/') + '/' + _spec['baseurl'].lstrip('/')
for respec in _spec['resources'].values():
for request, expect, response in query_resource(respec, baseurl):
yield assert_response, request, expect, response
def query_resource(respec, baseurl):
for case in respec['cases']:
request = httpblock(case['request'])
expected = httpblock(case['response'])
response = send(request, baseurl)
yield request, expected, response
def assert_response(request, expected, actual):
""" Assert actual response matches expected.
request is passed along for context.
"""
print(request, '\n', expected, '\n', actual)
eq_(expected.status, actual.status)
for header in expected.headers:
assert header in actual.headers, 'missing: ' + header
if expected.body:
eq_(json.loads(expected.body), json.loads(actual.body))
else:
eq_(expected.body, actual.body)
def send(block, baseurl=''):
""" Send request contained in httpblock object and return server response
as another httpblock.
"""
target = baseurl + block.target
print('target: %r' % target)
out, err = curl(target, X=block.method, H=block.headers)
if err:
raise RuntimeError(block.method + ' ' + target, err)
return httpblock(out)
class httpblock(object):
""" Representation of an HTTP request or response, or a template for one.
Server responses will be decoded assuming UTF-8 encoding.
"""
def __init__(self, txtblock):
self.type = 'Empty Block'
self.firstline = None
self.headers = []
self.body = None
self.method = None
self.status = -1
self.target = None
try:
try:
headers, body = re.split('\r\n\r\n|\n\n', txtblock, maxsplit=1)
except TypeError:
headers, body = re.split(b'\r\n\r\n|\n\n', txtblock, maxsplit=1)
headers = headers.decode('ascii')
body = body.decode('utf-8') # assume body is utf-8
except ValueError:
self.type = "Bodyless Block"
headers = txtblock
body = None
headers = [h for h in re.split('\r\n|\n', headers) if h.strip()]
firstline = headers.pop(0)
word = firstline.split(' ', 1)[0].upper() # py2-compatible split
if word.startswith('HTTP'):
self.type="Server Response"
self.status = int(firstline.split()[1])
elif word.isdigit():
self.type = "Response Template"
self.status = int(firstline)
else:
self.type = "Request Template"
self.method, self.target = firstline.split(' ', 1)
self.firstline = firstline
self.headers = headers
self.body = body
def __str__(self):
txt = ['--- ' + self.type + ' ---']
txt += [self.firstline] if self.firstline else []
txt += self.headers
txt += ['', self.body] if self.body else []
return '\n'.join(txt)
def __repr__(self):
return '%s("""%s""")' % (type(self).__name__, str(self))
def curl(url, **args):
""" Call ``curl`` with args via subprocess.
The command and parameters are assembled like this::
cmd = ['/user/bin/curl', '-i', '-s', '-S']
cmd.extend(convert(args))
cmd.append(url)
`args` get turned into additional command line arguments::
curl(url, key=v1, k=v2, other_key=v3, switch='')
will use these additional arguments::
['--key', v1, '-k', v2, '--other-key', v3, '--switch', '']
List values are expanded by repeating the argument:
args['H'] = [a, b] --> ['-H', a, '-H', b]
"""
cmd = ['/usr/bin/curl', '-i', '-s', '-S']
for arg, value in args.items():
arg = ('-' + arg) if len(arg) == 1 else ('--' + arg)
arg = arg.replace('_', '-')
if isinstance(value, (list, tuple)):
for val in value:
cmd.extend([arg, val])
else:
cmd.extend([arg, value])
cmd.append(url)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = proc.wait()
out, err = proc.communicate()
if exitcode != 0:
raise RuntimeError(exitcode, str(cmd), err, out)
return out, err
class APIServer(threading.Thread):
""" Wrapper that mounts API module to cherrypy and runs it in a thread
adapted from
http://peter.bourgon.org/blog/2009/07/15/a-nontrivial-cherrypy-server-example.html
"""
def __init__(self):
self.port = random.randint(2048, 65535)
threading.Thread.__init__(self)
self.sync = threading.Condition()
def run(self):
with self.sync:
cherrypy.server.socket_port = self.port
cherrypy.server.socket_host = 'localhost'
cms.api.v1.mount('/api/v1')
cherrypy.engine.start()
cherrypy.engine.block()
def stop(self):
with self.sync:
cherrypy.engine.exit()
cherrypy.server.stop()
if __name__ == '__main__':
nose.runmodule()
| 7,788 | Python | .py | 208 | 31.370192 | 90 | 0.633347 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,394 | users.py | devsnd_cherrymusic/cherrymusicserver/api/v1/users.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
import cherrypy
from cherrymusicserver.api.v1.models import Model
from cherrymusicserver.api.v1.resources import Resource
def get_resource():
return users()
class User(Model):
name = Model.Field(None)
roles = Model.Field([])
_userdb = {
'adm': User(id=1, name='adm', roles=('admin' 'user')),
'puh': User(id=22, name='puh', roles=('user' 'bear')),
}
class users(Resource):
def GET(self, name=None):
if name:
if not name in _userdb:
raise cherrypy.NotFound(name)
return _userdb[name]
return sorted(_userdb)
| 1,730 | Python | .py | 50 | 31.8 | 70 | 0.718563 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,395 | models.py | devsnd_cherrymusic/cherrymusicserver/api/v1/models.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
from backport import callable
class Model(object):
""" Base class for CherryMusic business objects.
If offers three important features:
- The constructor will set all keyword arguments as attributes;
- a subset of attributes ("fields"), to represent the visible state
of a model object, with default values;
- an :meth:`as_dict` method that returns a dict representation of
an object's fields.
Therefore, it should be possible to recreate an object from its dict.
To make a field attribute, simply declare a class attribute with
a :cls:`Model.Field` value::
class Foo(Model):
bar = Model.Field(defaultvalue)
If ``defaultvalue`` is callable, it will be called with the model
object as its sole argument when the field is initialized, and the
return value will be used as the actual default.
Fields will be listed as "Data Descriptors" in the ``help()`` of
a model class or object.
"""
class Field(object):
@classmethod
def fields_as_dict(cls, model):
"get a copy of the model's field-value dict"
return cls._values(model).copy()
@classmethod
def _init_fields(cls, model, valuedict):
""" Initialize all fields defined for the model's class by
assigning them their attribute names as names and putting their
default values into valuedict.
"""
modelcls = type(model)
for name in (n for n in dir(modelcls) if not n.startswith('__')):
field = getattr(modelcls, name)
if not isinstance(field, Model.Field):
continue
if not getattr(field, 'name', None):
field.name = name
default = field.default
value = default(model) if callable(default) else default
valuedict[name] = value
@classmethod
def _values(cls, model):
""" Get the value dict used for all fields of the model.
If it doesn't exist, create it and initialize all fields.
"""
try:
return model.__values
except AttributeError:
model.__values = v = {}
cls._init_fields(model, v)
return v
def __init__(self, default):
self.default = default
def __get__(self, model, modelcls):
if model is None:
return self
try:
return self._values(model)[self.name]
except KeyError:
raise AttributeError(self.name) # field value has been deleted
def __set__(self, model, value):
self._values(model)[self.name] = value
def __delete__(self, model):
del self._values(model)[self.name]
def __repr__(self): # pragma: no cover
return '{cls}({default!r})'.format(
cls=type(self).__name__, default=self.default)
id = Field(None)
id.__doc__ = """
value to uniquely identify the model (given its class) on the
server, or None"""
cls = Field(lambda s: type(s).__name__)
cls.__doc__ = "string to identify the class of the model"
def __init__(self, **kwargs):
""" initializes the model and sets keyword arguments as attributes """
for name, value in kwargs.items():
setattr(self, name, value)
def as_dict(self):
""" a dict representation of the model's fields and their values """
return self.Field.fields_as_dict(self)
| 4,866 | Python | .py | 112 | 34.776786 | 79 | 0.618897 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,396 | resources.py | devsnd_cherrymusic/cherrymusicserver/api/v1/resources.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Abstract and common behavior of RESTlike resources """
#python 2.6+ backward compability
from __future__ import unicode_literals
import cherrypy
@cherrypy.expose()
class Resource(object):
""" Base class for API resources.
A resource wraps an underlying model, and can have an owner through
this model.
"""
owner = None
model = None
| 1,434 | Python | .py | 41 | 33.073171 | 75 | 0.744236 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,397 | jsontools.py | devsnd_cherrymusic/cherrymusicserver/api/v1/jsontools.py | # -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" Utilities for JSON encoding and decoding.
Hook into the CherryPy's handler tool chain.
"""
#python 2.6+ backward compability
from __future__ import unicode_literals
import json
import sys
import cherrypy
class JSONEncoder(json.JSONEncoder):
""" JSONEncoder with support for encoding model objects. """
def default(self, obj):
try:
return obj.as_dict()
except AttributeError: # pragma: no cover
raise TypeError("can't JSON encode %s object %r" % (type(obj), obj))
# see cherrypy._cpcompat.json_encode
_json_encode = JSONEncoder().iterencode
if sys.version_info > (3,): # pragma: no cover
def json_encode(value):
for chunk in _json_encode(value):
yield chunk.encode('UTF-8')
else:
json_encode = _json_encode # pragma: no cover
def json_error_handler(status, message, traceback, version):
""" CherryPy error handler; turns errors into JSON objects """
status = str(status)
code = int(status.split()[0])
return json_encode(dict(code=code, status=status, message=message, version=version))
def json_handler(*args, **kwargs):
""" JSON handler that works with cherrypy.tools.json_out """
value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
if value is None:
return None # return empty body, not "null"
return json_encode(value)
| 2,530 | Python | .py | 63 | 37.111111 | 88 | 0.69284 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,398 | __init__.py | devsnd_cherrymusic/cherrymusicserver/api/v1/__init__.py | #!/usr/bin/env python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
""" CherryMusic REST API version 1
(! under construction !)
"""
# __________
# || .------.|
# ||/ [|
# ||| /||
# |||\ | [|
# _ ________ _ |||.'___| |'---...__
# /o)===|________(o\ ||========| ``-..
# / / _.----'\ \ |'=.====.=' ________ \
# / | .-' ----. / | | |____| .'.-------.\ |
# \ \ .'_.----._ \ | _\_|____|.'.'_.----._ \\__|
# /\ \ .'.' __ `.\ |-_| |____| /.' __ '.\ |
# // \ \' / / \ \\|-_|_|____|// / \ \`--'
# // \ / .| | | | |____| | | | |
# // \ .'.' | \ __ / | | \ __ / |
# // /'.' '. .' '. .'
# //_____.'-' `-.__.-' `-.__.-' LGB
# http://www.ascii-art.de/ascii/pqr/roadworks.txt (brought to you by the 90s)
#python 2.6+ backward compability
from __future__ import unicode_literals
import sys
import cherrypy
from cherrymusicserver.api.v1 import jsontools
from cherrymusicserver.api.v1 import users
from cherrymusicserver.api.v1.resources import Resource
debug = True
def get_resource():
""" Assembles and return the API root resource """
root = ResourceRoot()
root.users = users.get_resource()
return root
def get_config():
""" Return the CherryPy config dict for the API mount point """
return {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'error_page.default': jsontools.json_error_handler,
'tools.json_in.on': True,
'tools.json_out.on': True,
'tools.json_out.handler': jsontools.json_handler,
'tools.sessions.on': False,
},
}
def mount(mountpath):
""" Mount and configure API root resource to cherrypy.tree """
cherrypy.tree.mount(get_resource(), mountpath, config=get_config())
if sys.version_info < (3,): # pragma: no cover
# Disable a check that crashes the server in python2.
# Our config keys are unicode, and this check exposes them to an
# incompatible .translate() call in _cpdispatch.find_handler.
# (This setting must happen globally through config.update().)
cherrypy.config.update({
'checker.check_static_paths': False,
})
class ResourceRoot(Resource):
""" Defines the behavior of the API root resource;
subresources can define their own behavior and should be attached
dynamically.
"""
def GET(self):
""" Returns a list of available subresources """
resources = []
for name, member in self.__dict__.items():
if getattr(member, 'exposed', False):
resources.append(name)
return sorted(resources)
| 4,107 | Python | .py | 97 | 38.268041 | 80 | 0.528307 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |
20,399 | test_models.py | devsnd_cherrymusic/cherrymusicserver/api/v1/test/test_models.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import nose
from mock import *
from nose.tools import *
from cherrymusicserver.api.v1.models import Model
def test_model_constructor():
"Model(**kwargs) should initialize object attributes from kwargs"
m = Model(id=12, not_a_field=13)
eq_(12, m.id)
eq_(13, m.not_a_field)
def test_model_field_defaults():
"Fix behaviour of default fields and their default values"
eq_({'id': None, 'cls': 'Model'}, Model().as_dict())
class Test(Model):
pass
eq_('Test', Test().cls)
def test_model_as_dict():
""" Model.as_dict() should only include field attributes and reflect their
current value"""
m = Model(a=11)
m.id = 12
m.b = 13
eq_({'id': 12, 'cls': 'Model'}, m.as_dict())
def test_model_del_field():
""" Deleting a model object's field attribute should behave like deleting
a regular attribute without affecting the class or sister objects """
m = Model()
del m.cls
assert_raises(AttributeError, getattr, m, 'cls')
ok_('cls' not in m.as_dict())
ok_(Model.cls)
eq_('Model', Model().cls)
if __name__ == '__main__':
nose.runmodule()
| 2,222 | Python | .py | 63 | 32.31746 | 78 | 0.701026 | devsnd/cherrymusic | 1,032 | 187 | 111 | GPL-3.0 | 9/5/2024, 5:12:30 PM (Europe/Amsterdam) |