id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,600
|
gctools.py
|
rembo10_headphones/lib/cherrypy/lib/gctools.py
|
import gc
import inspect
import sys
import time
try:
import objgraph
except ImportError:
objgraph = None
import cherrypy
from cherrypy import _cprequest, _cpwsgi
from cherrypy.process.plugins import SimplePlugin
class ReferrerTree(object):
"""An object which gathers all referrers of an object to a given depth."""
peek_length = 40
def __init__(self, ignore=None, maxdepth=2, maxparents=10):
self.ignore = ignore or []
self.ignore.append(inspect.currentframe().f_back)
self.maxdepth = maxdepth
self.maxparents = maxparents
def ascend(self, obj, depth=1):
"""Return a nested list containing referrers of the given object."""
depth += 1
parents = []
# Gather all referrers in one step to minimize
# cascading references due to repr() logic.
refs = gc.get_referrers(obj)
self.ignore.append(refs)
if len(refs) > self.maxparents:
return [('[%s referrers]' % len(refs), [])]
try:
ascendcode = self.ascend.__code__
except AttributeError:
ascendcode = self.ascend.im_func.func_code
for parent in refs:
if inspect.isframe(parent) and parent.f_code is ascendcode:
continue
if parent in self.ignore:
continue
if depth <= self.maxdepth:
parents.append((parent, self.ascend(parent, depth)))
else:
parents.append((parent, []))
return parents
def peek(self, s):
"""Return s, restricted to a sane length."""
if len(s) > (self.peek_length + 3):
half = self.peek_length // 2
return s[:half] + '...' + s[-half:]
else:
return s
def _format(self, obj, descend=True):
"""Return a string representation of a single object."""
if inspect.isframe(obj):
filename, lineno, func, context, index = inspect.getframeinfo(obj)
return "<frame of function '%s'>" % func
if not descend:
return self.peek(repr(obj))
if isinstance(obj, dict):
return '{' + ', '.join(['%s: %s' % (self._format(k, descend=False),
self._format(v, descend=False))
for k, v in obj.items()]) + '}'
elif isinstance(obj, list):
return '[' + ', '.join([self._format(item, descend=False)
for item in obj]) + ']'
elif isinstance(obj, tuple):
return '(' + ', '.join([self._format(item, descend=False)
for item in obj]) + ')'
r = self.peek(repr(obj))
if isinstance(obj, (str, int, float)):
return r
return '%s: %s' % (type(obj), r)
def format(self, tree):
"""Return a list of string reprs from a nested list of referrers."""
output = []
def ascend(branch, depth=1):
for parent, grandparents in branch:
output.append((' ' * depth) + self._format(parent))
if grandparents:
ascend(grandparents, depth + 1)
ascend(tree)
return output
def get_instances(cls):
return [x for x in gc.get_objects() if isinstance(x, cls)]
class RequestCounter(SimplePlugin):
def start(self):
self.count = 0
def before_request(self):
self.count += 1
def after_request(self):
self.count -= 1
request_counter = RequestCounter(cherrypy.engine)
request_counter.subscribe()
def get_context(obj):
if isinstance(obj, _cprequest.Request):
return 'path=%s;stage=%s' % (obj.path_info, obj.stage)
elif isinstance(obj, _cprequest.Response):
return 'status=%s' % obj.status
elif isinstance(obj, _cpwsgi.AppResponse):
return 'PATH_INFO=%s' % obj.environ.get('PATH_INFO', '')
elif hasattr(obj, 'tb_lineno'):
return 'tb_lineno=%s' % obj.tb_lineno
return ''
class GCRoot(object):
"""A CherryPy page handler for testing reference leaks."""
classes = [
(_cprequest.Request, 2, 2,
'Should be 1 in this request thread and 1 in the main thread.'),
(_cprequest.Response, 2, 2,
'Should be 1 in this request thread and 1 in the main thread.'),
(_cpwsgi.AppResponse, 1, 1,
'Should be 1 in this request thread only.'),
]
@cherrypy.expose
def index(self):
return 'Hello, world!'
@cherrypy.expose
def stats(self):
output = ['Statistics:']
for trial in range(10):
if request_counter.count > 0:
break
time.sleep(0.5)
else:
output.append('\nNot all requests closed properly.')
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it thrice and hope for the best.
gc.collect()
gc.collect()
unreachable = gc.collect()
if unreachable:
if objgraph is not None:
final = objgraph.by_type('Nondestructible')
if final:
objgraph.show_backrefs(final, filename='finalizers.png')
trash = {}
for x in gc.garbage:
trash[type(x)] = trash.get(type(x), 0) + 1
if trash:
output.insert(0, '\n%s unreachable objects:' % unreachable)
trash = [(v, k) for k, v in trash.items()]
trash.sort()
for pair in trash:
output.append(' ' + repr(pair))
# Check declared classes to verify uncollected instances.
# These don't have to be part of a cycle; they can be
# any objects that have unanticipated referrers that keep
# them from being collected.
allobjs = {}
for cls, minobj, maxobj, msg in self.classes:
allobjs[cls] = get_instances(cls)
for cls, minobj, maxobj, msg in self.classes:
objs = allobjs[cls]
lenobj = len(objs)
if lenobj < minobj or lenobj > maxobj:
if minobj == maxobj:
output.append(
'\nExpected %s %r references, got %s.' %
(minobj, cls, lenobj))
else:
output.append(
'\nExpected %s to %s %r references, got %s.' %
(minobj, maxobj, cls, lenobj))
for obj in objs:
if objgraph is not None:
ig = [id(objs), id(inspect.currentframe())]
fname = 'graph_%s_%s.png' % (cls.__name__, id(obj))
objgraph.show_backrefs(
obj, extra_ignore=ig, max_depth=4, too_many=20,
filename=fname, extra_info=get_context)
output.append('\nReferrers for %s (refcount=%s):' %
(repr(obj), sys.getrefcount(obj)))
t = ReferrerTree(ignore=[objs], maxdepth=3)
tree = t.ascend(obj)
output.extend(t.format(tree))
return '\n'.join(output)
| 7,344
|
Python
|
.py
| 176
| 29.948864
| 79
| 0.545455
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,601
|
cptools.py
|
rembo10_headphones/lib/cherrypy/lib/cptools.py
|
"""Functions for builtin CherryPy tools."""
import logging
import re
from hashlib import md5
import urllib.parse
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import httputil as _httputil
from cherrypy.lib import is_iterator
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See :rfc:`2616` Section 14.24.
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, 'ETag'):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ['*'] or etag in conditions):
raise cherrypy.HTTPError(412, 'If-Match failed: ETag %r did '
'not match %r' % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ['*'] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' %
request.method, 'TOOLS.ETAGS')
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, 'If-None-Match failed: ETag %r '
'matched %r' % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no
validation will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
The given methods are case-insensitive, and may be in any order. If
only one method is allowed, you may supply a single string; if more
than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
"""
if not isinstance(methods, (tuple, list)):
methods = [methods]
methods = [m.upper() for m in methods if m]
if not methods:
methods = ['GET', 'HEAD']
elif 'GET' in methods and 'HEAD' not in methods:
methods.append('HEAD')
cherrypy.response.headers['Allow'] = ', '.join(methods)
if cherrypy.request.method not in methods:
if debug:
cherrypy.log('request.method %r not in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
raise cherrypy.HTTPError(405)
else:
if debug:
cherrypy.log('request.method %r in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP
server.
For Apache and lighttpd, you should leave the 'local' argument at
the default value of 'X-Forwarded-Host'. For Squid, you probably
want to set tools.proxy.local = 'Origin'.
If you want the new request.base to include path info (not just the
host), you must explicitly set base to the full base path, and ALSO
set 'local' to '', so that the X-Forwarded-Host request header
(which never includes path info) does not override it. Regardless,
the value for 'base' MUST NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid. By
default, 'remote' is set to 'X-Forwarded-For'. If you do not want to
rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find('://')]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
default = urllib.parse.urlparse(request.base).netloc
base = request.headers.get('Host', default)
if base.find('://') == -1:
# add http:// or https:// if needed
base = scheme + '://' + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
# Grab the first IP in a comma-separated list. Ref #1268.
xff = next(ip.strip() for ip in xff.split(','))
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers; for
example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern
A regular expression pattern to test against the Referer.
accept
If True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing
If True, permit requests with no Referer header.
error
The HTTP error code to return to the client on failure.
message
A string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = 'username'
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='',
**kwargs):
return (str("""<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" />
<br />
Password: <input type="password" name="password" size="10" />
<br />
<input type="hidden" name="from_page" value="%(from_page)s" />
<br />
<input type="submit" />
</form>
</body></html>""") % vars()).encode('utf-8')
def do_login(self, username, password, from_page='..', **kwargs):
"""Login.
May raise redirect, or return True if request handled.
"""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or '/')
def do_logout(self, from_page='..', **kwargs):
"""Logout.
May raise redirect, or return True if request handled.
"""
sess = cherrypy.session
username = sess.get(self.session_key)
sess[self.session_key] = None
if username:
cherrypy.serving.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page)
def do_check(self):
"""Assert username.
Raise redirect, or return True if request handled.
"""
sess = cherrypy.session
request = cherrypy.serving.request
response = cherrypy.serving.response
username = sess.get(self.session_key)
if not username:
sess[self.session_key] = username = self.anonymous()
self._debug_message('No session[username], trying anonymous')
if not username:
url = cherrypy.url(qs=request.query_string)
self._debug_message(
'No username, routing to login_screen with from_page %(url)r',
locals(),
)
response.body = self.login_screen(url)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
self._debug_message('Setting request.login to %(username)r', locals())
request.login = username
self.on_check(username)
def _debug_message(self, template, context={}):
if not self.debug:
return
cherrypy.log(template % context, 'TOOLS.SESSAUTH')
def run(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
path = request.path_info
if path.endswith('login_screen'):
self._debug_message('routing %(path)r to login_screen', locals())
response.body = self.login_screen()
return True
elif path.endswith('do_login'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
self._debug_message('do_login requires POST')
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_login', locals())
return self.do_login(**request.params)
elif path.endswith('do_logout'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_logout', locals())
return self.do_logout(**request.params)
else:
self._debug_message('No special path, running do_check')
return self.do_check()
def session_auth(**kwargs):
"""Session authentication hook.
Any attribute of the SessionAuth class may be overridden
via a keyword arg to this function:
""" + '\n' + '\n '.join(
'{!s}: {!s}'.format(k, type(getattr(SessionAuth, k)).__name__)
for k in dir(SessionAuth)
if not k.startswith('__')
)
sa = SessionAuth()
for k, v in kwargs.items():
setattr(sa, k, v)
return sa.run()
def log_traceback(severity=logging.ERROR, debug=False):
"""Write the last error's traceback to the cherrypy error log."""
cherrypy.log('', 'HTTP', severity=severity, traceback=True)
def log_request_headers(debug=False):
"""Write request headers to the cherrypy error log."""
h = [' %s: %s' % (k, v) for k, v in cherrypy.serving.request.header_list]
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), 'HTTP')
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(' %s:' % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(' %r' % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), 'HTTP')
def redirect(url='', internal=True, debug=False):
"""Raise InternalRedirect or HTTPRedirect to the given url."""
if debug:
cherrypy.log('Redirecting %sto: %s' %
({True: 'internal ', False: ''}[internal], url),
'TOOLS.REDIRECT')
if internal:
raise cherrypy.InternalRedirect(url)
else:
raise cherrypy.HTTPRedirect(url)
def trailing_slash(missing=True, extra=False, status=None, debug=False):
"""Redirect if path_info has (missing|extra) trailing slash."""
request = cherrypy.serving.request
pi = request.path_info
if debug:
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
(request.is_index, missing, extra, pi),
'TOOLS.TRAILING_SLASH')
if request.is_index is True:
if missing:
if not pi.endswith('/'):
new_url = cherrypy.url(pi + '/', request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
elif request.is_index is False:
if extra:
# If pi == '/', don't redirect to ''!
if pi.endswith('/') and pi != '/':
new_url = cherrypy.url(pi[:-1], request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested
generators'; that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def accept(media=None, debug=False):
"""Return the client's preferred media-type (from the given Content-Types).
If 'media' is None (the default), no test will be performed.
If 'media' is provided, it should be the Content-Type value (as a string)
or values (as a list or tuple of strings) which the current resource
can emit. The client's acceptable media ranges (as declared in the
Accept request header) will be matched in order to these Content-Type
values; the first such string is returned. That is, the return value
will always be one of the strings provided in the 'media' arg (or None
if 'media' is None).
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
Note that most web browsers send */* as a (low-quality) acceptable
media range, which should match any Content-Type. In addition, "...if
no Accept header field is present, then it is assumed that the client
accepts all media types."
Matching types are checked in order of client preference first,
and then in the order of the given 'media' values.
Note that this function does not honor accept-params (other than "q").
"""
if not media:
return
if isinstance(media, text_or_bytes):
media = [media]
request = cherrypy.serving.request
# Parse the Accept request header, and try to match one
# of the requested media-ranges (in order of preference).
ranges = request.headers.elements('Accept')
if not ranges:
# Any media type is acceptable.
if debug:
cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
return media[0]
else:
# Note that 'ranges' is sorted in order of preference
for element in ranges:
if element.qvalue > 0:
if element.value == '*/*':
# Matches any type or subtype
if debug:
cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
return media[0]
elif element.value.endswith('/*'):
# Matches any subtype
mtype = element.value[:-1] # Keep the slash
for m in media:
if m.startswith(mtype):
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return m
else:
# Matches exact value
if element.value in media:
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return element.value
# No suitable media-range found.
ah = request.headers.get('Accept')
if ah is None:
msg = 'Your client did not send an Accept header.'
else:
msg = 'Your client sent this Accept header: %s.' % ah
msg += (' But this resource only emits these media types: %s.' %
', '.join(media))
raise cherrypy.HTTPError(406, msg)
class MonitoredHeaderMap(_httputil.HeaderMap):
def transform_key(self, key):
self.accessed_headers.add(key)
return super(MonitoredHeaderMap, self).transform_key(key)
def __init__(self):
self.accessed_headers = set()
super(MonitoredHeaderMap, self).__init__()
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def convert_params(exception=ValueError, error=400):
"""Convert request params based on function annotations.
This function also processes errors that are subclasses of ``exception``.
:param BaseException exception: Exception class to catch.
:type exception: BaseException
:param error: The HTTP status code to return to the client on failure.
:type error: int
"""
request = cherrypy.serving.request
types = request.handler.callable.__annotations__
with cherrypy.HTTPError.handle(exception, error):
for key in set(types).intersection(request.params):
request.params[key] = types[key](request.params[key])
| 23,584
|
Python
|
.py
| 531
| 35.039548
| 79
| 0.611387
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,602
|
caching.py
|
rembo10_headphones/lib/cherrypy/lib/caching.py
|
"""
CherryPy implements a simple caching system as a pluggable Tool. This tool
tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there
yet, but it's probably good enough for most sites.
In general, GET responses are cached (along with selecting headers) and, if
another request arrives for the same resource, the caching Tool will return 304
Not Modified if possible, or serve the cached response otherwise. It also sets
request.cached to True if serving a cached representation, and sets
request.cacheable to False (so it doesn't get cached again).
If POST, PUT, or DELETE requests are made for a cached resource, they
invalidate (delete) any cached response.
Usage
=====
Configuration file example::
[/]
tools.caching.on = True
tools.caching.delay = 3600
You may use a class other than the default
:class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config
entry ``cache_class``; supply the full dotted name of the replacement class
as the config value. It must implement the basic methods ``get``, ``put``,
``delete``, and ``clear``.
You may set any attribute, including overriding methods, on the cache
instance by providing them in config. The above sets the
:attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example.
"""
import datetime
import sys
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
class Cache(object):
"""Base class for Cache implementations."""
def get(self):
"""Return the current variant if in the cache, else None."""
raise NotImplementedError
def put(self, obj, size):
"""Store the current variant in the cache."""
raise NotImplementedError
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplementedError
def clear(self):
"""Reset the cache to its initial, empty state."""
raise NotImplementedError
# ------------------------------ Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None, and the value is already being
calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires,
it is returned. If not, None is returned, and a sentinel placed
in the cache to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading.Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' %
timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading.Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in ``self.store[uri].selecting_headers``.
The items contained in ``self.store[uri]`` have keys which are tuples of
request header values (in the same order as the names in its
selecting_headers), and values which are the actual responses.
"""
maxobjects = 1000
"""The maximum number of cached objects; defaults to 1000."""
maxobj_size = 100000
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
maxsize = 10000000
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
delay = 600
"""Seconds until the cached content expires; defaults to 600 (10 minutes).
"""
antistampede_timeout = 5
"""Seconds to wait for other threads to release a cache lock."""
expire_freq = 0.1
"""Seconds to sleep between cache expiration sweeps."""
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
t.daemon = True
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
"""Continuously examine cached objects, expiring stale ones.
This function is designed to be run in its own daemon thread,
referenced at ``self.expiration_thread``.
"""
# It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in self.expirations.copy().items():
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][tuple(sel_header_values)]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
variant = uricache.wait(key=tuple(sorted(header_values)),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
uricache[tuple(sorted(header_values))] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=('POST', 'PUT', 'DELETE'), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, '_cache'):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop('cache_class', MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(
400, 'Invalid Cache-Control header')
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log(
'Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See
# https://github.com/cherrypy/cherrypy/issues/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers['Age'] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect:
x = sys.exc_info()[1]
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
"""Tee response output to cache storage.
Internal.
"""
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# Save the cache data, but only if the body isn't empty.
# e.g. a 304 Not Modified on a static file response will
# have an empty body.
# If the body is empty, delete the cache because it
# contains a stale Threading._Event object that will
# stall all consecutive requests until the _Event times
# out
body = b''.join(output)
if not body:
cherrypy._cache.delete()
else:
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
secs
Must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to response.time + secs.
If secs is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
* Pragma: no-cache
* Cache-Control': no-cache, must-revalidate
force
If False, the following headers are checked:
* Etag
* Last-Modified
* Age
* Expires
If any are already present, none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ('Pragma' not in headers):
headers['Pragma'] = 'no-cache'
if cherrypy.serving.request.protocol >= (1, 1):
if force or 'Cache-Control' not in headers:
headers['Cache-Control'] = 'no-cache, must-revalidate'
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or 'Expires' not in headers:
headers['Expires'] = expiry
| 17,523
|
Python
|
.py
| 395
| 34.075949
| 79
| 0.607885
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,603
|
covercp.py
|
rembo10_headphones/lib/cherrypy/lib/covercp.py
|
"""Code-coverage tools for CherryPy.
To use this module, or the coverage tools in the test suite,
you need to download 'coverage.py', either Gareth Rees' `original
implementation <http://www.garethrees.org/2001/12/04/python-coverage/>`_
or Ned Batchelder's `enhanced version:
<http://www.nedbatchelder.com/code/modules/coverage.html>`_
To turn on coverage tracing, use the following code::
cherrypy.engine.subscribe('start', covercp.start)
DO NOT subscribe anything on the 'start_thread' channel, as previously
recommended. Calling start once in the main thread should be sufficient
to start coverage on all threads. Calling start again in each thread
effectively clears any coverage data gathered up to that point.
Run your code, then use the ``covercp.serve()`` function to browse the
results in a web browser. If you run this module from the command line,
it will call ``serve()`` for you.
"""
import re
import sys
import cgi
import os
import os.path
import urllib.parse
import cherrypy
localFile = os.path.join(os.path.dirname(__file__), 'coverage.cache')
the_coverage = None
try:
from coverage import coverage
the_coverage = coverage(data_file=localFile)
def start():
the_coverage.start()
except ImportError:
# Setting the_coverage to None will raise errors
# that need to be trapped downstream.
the_coverage = None
import warnings
warnings.warn(
'No code coverage will be performed; '
'coverage.py could not be imported.')
def start():
pass
start.priority = 20
TEMPLATE_MENU = """<html>
<head>
<title>CherryPy Coverage Menu</title>
<style>
body {font: 9pt Arial, serif;}
#tree {
font-size: 8pt;
font-family: Andale Mono, monospace;
white-space: pre;
}
#tree a:active, a:focus {
background-color: black;
padding: 1px;
color: white;
border: 0px solid #9999FF;
-moz-outline-style: none;
}
.fail { color: red;}
.pass { color: #888;}
#pct { text-align: right;}
h3 {
font-size: small;
font-weight: bold;
font-style: italic;
margin-top: 5px;
}
input { border: 1px solid #ccc; padding: 2px; }
.directory {
color: #933;
font-style: italic;
font-weight: bold;
font-size: 10pt;
}
.file {
color: #400;
}
a { text-decoration: none; }
#crumbs {
color: white;
font-size: 8pt;
font-family: Andale Mono, monospace;
width: 100%;
background-color: black;
}
#crumbs a {
color: #f88;
}
#options {
line-height: 2.3em;
border: 1px solid black;
background-color: #eee;
padding: 4px;
}
#exclude {
width: 100%;
margin-bottom: 3px;
border: 1px solid #999;
}
#submit {
background-color: black;
color: white;
border: 0;
margin-bottom: -9px;
}
</style>
</head>
<body>
<h2>CherryPy Coverage</h2>"""
TEMPLATE_FORM = """
<div id="options">
<form action='menu' method=GET>
<input type='hidden' name='base' value='%(base)s' />
Show percentages
<input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
Hide files over
<input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
Exclude files matching<br />
<input type='text' id='exclude' name='exclude'
value='%(exclude)s' size='20' />
<br />
<input type='submit' value='Change view' id="submit"/>
</form>
</div>"""
TEMPLATE_FRAMESET = """<html>
<head><title>CherryPy coverage data</title></head>
<frameset cols='250, 1*'>
<frame src='menu?base=%s' />
<frame name='main' src='' />
</frameset>
</html>
"""
TEMPLATE_COVERAGE = """<html>
<head>
<title>Coverage for %(name)s</title>
<style>
h2 { margin-bottom: .25em; }
p { margin: .25em; }
.covered { color: #000; background-color: #fff; }
.notcovered { color: #fee; background-color: #500; }
.excluded { color: #00f; background-color: #fff; }
table .covered, table .notcovered, table .excluded
{ font-family: Andale Mono, monospace;
font-size: 10pt; white-space: pre; }
.lineno { background-color: #eee;}
.notcovered .lineno { background-color: #000;}
table { border-collapse: collapse;
</style>
</head>
<body>
<h2>%(name)s</h2>
<p>%(fullpath)s</p>
<p>Coverage: %(pc)s%%</p>"""
TEMPLATE_LOC_COVERED = """<tr class="covered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_NOT_COVERED = """<tr class="notcovered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_EXCLUDED = """<tr class="excluded">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_ITEM = (
"%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
)
def _percent(statements, missing):
s = len(statements)
e = s - len(missing)
if s > 0:
return int(round(100.0 * e / s))
return 0
def _show_branch(root, base, path, pct=0, showpct=False, exclude='',
coverage=the_coverage):
# Show the directory name and any of our children
dirs = [k for k, v in root.items() if v]
dirs.sort()
for name in dirs:
newpath = os.path.join(path, name)
if newpath.lower().startswith(base):
relpath = newpath[len(base):]
yield '| ' * relpath.count(os.sep)
yield (
"<a class='directory' "
"href='menu?base=%s&exclude=%s'>%s</a>\n" %
(newpath, urllib.parse.quote_plus(exclude), name)
)
for chunk in _show_branch(
root[name], base, newpath, pct, showpct,
exclude, coverage=coverage
):
yield chunk
# Now list the files
if path.lower().startswith(base):
relpath = path[len(base):]
files = [k for k, v in root.items() if not v]
files.sort()
for name in files:
newpath = os.path.join(path, name)
pc_str = ''
if showpct:
try:
_, statements, _, missing, _ = coverage.analysis2(newpath)
except Exception:
# Yes, we really want to pass on all errors.
pass
else:
pc = _percent(statements, missing)
pc_str = ('%3d%% ' % pc).replace(' ', ' ')
if pc < float(pct) or pc == -1:
pc_str = "<span class='fail'>%s</span>" % pc_str
else:
pc_str = "<span class='pass'>%s</span>" % pc_str
yield TEMPLATE_ITEM % ('| ' * (relpath.count(os.sep) + 1),
pc_str, newpath, name)
def _skip_file(path, exclude):
if exclude:
return bool(re.search(exclude, path))
def _graft(path, tree):
d = tree
p = path
atoms = []
while True:
p, tail = os.path.split(p)
if not tail:
break
atoms.append(tail)
atoms.append(p)
if p != '/':
atoms.append('/')
atoms.reverse()
for node in atoms:
if node:
d = d.setdefault(node, {})
def get_tree(base, exclude, coverage=the_coverage):
"""Return covered module names as a nested dict."""
tree = {}
runs = coverage.data.executed_files()
for path in runs:
if not _skip_file(path, exclude) and not os.path.isdir(path):
_graft(path, tree)
return tree
class CoverStats(object):
def __init__(self, coverage, root=None):
self.coverage = coverage
if root is None:
# Guess initial depth. Files outside this path will not be
# reachable from the web interface.
root = os.path.dirname(cherrypy.__file__)
self.root = root
@cherrypy.expose
def index(self):
return TEMPLATE_FRAMESET % self.root.lower()
@cherrypy.expose
def menu(self, base='/', pct='50', showpct='',
exclude=r'python\d\.\d|test|tut\d|tutorial'):
# The coverage module uses all-lower-case names.
base = base.lower().rstrip(os.sep)
yield TEMPLATE_MENU
yield TEMPLATE_FORM % locals()
# Start by showing links for parent paths
yield "<div id='crumbs'>"
path = ''
atoms = base.split(os.sep)
atoms.pop()
for atom in atoms:
path += atom + os.sep
yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s"
% (path, urllib.parse.quote_plus(exclude), atom, os.sep))
yield '</div>'
yield "<div id='tree'>"
# Then display the tree
tree = get_tree(base, exclude, self.coverage)
if not tree:
yield '<p>No modules covered.</p>'
else:
for chunk in _show_branch(tree, base, '/', pct,
showpct == 'checked', exclude,
coverage=self.coverage):
yield chunk
yield '</div>'
yield '</body></html>'
def annotated_file(self, filename, statements, excluded, missing):
with open(filename, 'r') as source:
lines = source.readlines()
buffer = []
for lineno, line in enumerate(lines):
lineno += 1
line = line.strip('\n\r')
empty_the_buffer = True
if lineno in excluded:
template = TEMPLATE_LOC_EXCLUDED
elif lineno in missing:
template = TEMPLATE_LOC_NOT_COVERED
elif lineno in statements:
template = TEMPLATE_LOC_COVERED
else:
empty_the_buffer = False
buffer.append((lineno, line))
if empty_the_buffer:
for lno, pastline in buffer:
yield template % (lno, cgi.escape(pastline))
buffer = []
yield template % (lineno, cgi.escape(line))
@cherrypy.expose
def report(self, name):
filename, statements, excluded, missing, _ = self.coverage.analysis2(
name)
pc = _percent(statements, missing)
yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
fullpath=name,
pc=pc)
yield '<table>\n'
for line in self.annotated_file(filename, statements, excluded,
missing):
yield line
yield '</table>'
yield '</body>'
yield '</html>'
def serve(path=localFile, port=8080, root=None):
if coverage is None:
raise ImportError('The coverage module could not be imported.')
from coverage import coverage
cov = coverage(data_file=path)
cov.load()
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': 'production',
})
cherrypy.quickstart(CoverStats(cov, root))
if __name__ == '__main__':
serve(*tuple(sys.argv[1:]))
| 11,599
|
Python
|
.py
| 334
| 25.523952
| 78
| 0.551838
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,604
|
auth_basic.py
|
rembo10_headphones/lib/cherrypy/lib/auth_basic.py
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
"""HTTP Basic Authentication tool.
This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in
:rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
'tools.auth_basic.accept_charset': 'UTF-8',
}
app_config = { '/' : basic_auth }
"""
import binascii
import unicodedata
import base64
import cherrypy
from cherrypy._cpcompat import ntou, tonative
__author__ = 'visteya'
__date__ = 'April 2009'
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False, accept_charset='utf-8'):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`
and :rfc:`7617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
fallback_charset = 'ISO-8859-1'
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
# split() error, base64.decodestring() error
msg = 'Bad Request'
with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg):
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
charsets = accept_charset, fallback_charset
decoded_params = base64.b64decode(params.encode('ascii'))
decoded_params = _try_decode(decoded_params, charsets)
decoded_params = ntou(decoded_params)
decoded_params = unicodedata.normalize('NFC', decoded_params)
decoded_params = tonative(decoded_params)
username, password = decoded_params.split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
charset = accept_charset.upper()
charset_declaration = (
(', charset="%s"' % charset)
if charset != fallback_charset
else ''
)
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = (
'Basic realm="%s"%s' % (realm, charset_declaration)
)
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
def _try_decode(subject, charsets):
for charset in charsets[:-1]:
try:
return tonative(subject, charset)
except ValueError:
pass
return tonative(subject, charsets[-1])
| 4,420
|
Python
|
.py
| 96
| 38.364583
| 79
| 0.671472
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,605
|
cpstats.py
|
rembo10_headphones/lib/cherrypy/lib/cpstats.py
|
"""CPStats, a package for collecting and reporting on program statistics.
Overview
========
Statistics about program operation are an invaluable monitoring and debugging
tool. Unfortunately, the gathering and reporting of these critical values is
usually ad-hoc. This package aims to add a centralized place for gathering
statistical performance data, a structure for recording that data which
provides for extrapolation of that data into more useful information,
and a method of serving that data to both human investigators and
monitoring software. Let's examine each of those in more detail.
Data Gathering
--------------
Just as Python's `logging` module provides a common importable for gathering
and sending messages, performance statistics would benefit from a similar
common mechanism, and one that does *not* require each package which wishes
to collect stats to import a third-party module. Therefore, we choose to
re-use the `logging` module by adding a `statistics` object to it.
That `logging.statistics` object is a nested dict. It is not a custom class,
because that would:
1. require libraries and applications to import a third-party module in
order to participate
2. inhibit innovation in extrapolation approaches and in reporting tools, and
3. be slow.
There are, however, some specifications regarding the structure of the dict.::
{
+----"SQLAlchemy": {
| "Inserts": 4389745,
| "Inserts per Second":
| lambda s: s["Inserts"] / (time() - s["Start"]),
| C +---"Table Statistics": {
| o | "widgets": {-----------+
N | l | "Rows": 1.3M, | Record
a | l | "Inserts": 400, |
m | e | },---------------------+
e | c | "froobles": {
s | t | "Rows": 7845,
p | i | "Inserts": 0,
a | o | },
c | n +---},
e | "Slow Queries":
| [{"Query": "SELECT * FROM widgets;",
| "Processing Time": 47.840923343,
| },
| ],
+----},
}
The `logging.statistics` dict has four levels. The topmost level is nothing
more than a set of names to introduce modularity, usually along the lines of
package names. If the SQLAlchemy project wanted to participate, for example,
it might populate the item `logging.statistics['SQLAlchemy']`, whose value
would be a second-layer dict we call a "namespace". Namespaces help multiple
packages to avoid collisions over key names, and make reports easier to read,
to boot. The maintainers of SQLAlchemy should feel free to use more than one
namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case
or other syntax constraints on the namespace names; they should be chosen
to be maximally readable by humans (neither too short nor too long).
Each namespace, then, is a dict of named statistical values, such as
'Requests/sec' or 'Uptime'. You should choose names which will look
good on a report: spaces and capitalization are just fine.
In addition to scalars, values in a namespace MAY be a (third-layer)
dict, or a list, called a "collection". For example, the CherryPy
:class:`StatsTool` keeps track of what each request is doing (or has most
recently done) in a 'Requests' collection, where each key is a thread ID; each
value in the subdict MUST be a fourth dict (whew!) of statistical data about
each thread. We call each subdict in the collection a "record". Similarly,
the :class:`StatsTool` also keeps a list of slow queries, where each record
contains data about each slow query, in order.
Values in a namespace or record may also be functions, which brings us to:
Extrapolation
-------------
The collection of statistical data needs to be fast, as close to unnoticeable
as possible to the host program. That requires us to minimize I/O, for example,
but in Python it also means we need to minimize function calls. So when you
are designing your namespace and record values, try to insert the most basic
scalar values you already have on hand.
When it comes time to report on the gathered data, however, we usually have
much more freedom in what we can calculate. Therefore, whenever reporting
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
of `logging.statistics` for reporting, they first call
`extrapolate_statistics` (passing the whole `statistics` dict as the only
argument). This makes a deep copy of the statistics dict so that the
reporting tool can both iterate over it and even change it without harming
the original. But it also expands any functions in the dict by calling them.
For example, you might have a 'Current Time' entry in the namespace with the
value "lambda scope: time.time()". The "scope" parameter is the current
namespace dict (or record, if we're currently expanding one of those
instead), allowing you access to existing static entries. If you're truly
evil, you can even modify more than one entry at a time.
However, don't try to calculate an entry and then use its value in further
extrapolations; the order in which the functions are called is not guaranteed.
This can lead to a certain amount of duplicated work (or a redesign of your
schema), but that's better than complicating the spec.
After the whole thing has been extrapolated, it's time for:
Reporting
---------
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
it all, and then transforms it to HTML for easy viewing. Each namespace gets
its own header and attribute table, plus an extra table for each collection.
This is NOT part of the statistics specification; other tools can format how
they like.
You can control which columns are output and how they are formatted by updating
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
`logging.statistics`. The difference is that, instead of data values, it has
formatting values. Use None for a given key to indicate to the StatsPage that a
given column should not be output. Use a string with formatting
(such as '%.3f') to interpolate the value(s), or use a callable (such as
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
mentioned in the formatting dict is output unchanged.
Monitoring
----------
Although the HTML output takes pains to assign unique id's to each <td> with
statistical data, you're probably better off fetching /cpstats/data, which
outputs the whole (extrapolated) `logging.statistics` dict in JSON format.
That is probably easier to parse, and doesn't have any formatting controls,
so you get the "original" data in a consistently-serialized format.
Note: there's no treatment yet for datetime objects. Try time.time() instead
for now if you can. Nagios will probably thank you.
Turning Collection Off
----------------------
It is recommended each namespace have an "Enabled" item which, if False,
stops collection (but not reporting) of statistical data. Applications
SHOULD provide controls to pause and resume collection by setting these
entries to False or True, if present.
Usage
=====
To collect statistics on CherryPy applications::
from cherrypy.lib import cpstats
appconfig['/']['tools.cpstats.on'] = True
To collect statistics on your own code::
import logging
# Initialize the repository
if not hasattr(logging, 'statistics'): logging.statistics = {}
# Initialize my namespace
mystats = logging.statistics.setdefault('My Stuff', {})
# Initialize my namespace's scalars and collections
mystats.update({
'Enabled': True,
'Start Time': time.time(),
'Important Events': 0,
'Events/Second': lambda s: (
(s['Important Events'] / (time.time() - s['Start Time']))),
})
...
for event in events:
...
# Collect stats
if mystats.get('Enabled', False):
mystats['Important Events'] += 1
To report statistics::
root.cpstats = cpstats.StatsPage()
To format statistics reports::
See 'Reporting', above.
"""
import logging
import os
import sys
import threading
import time
import cherrypy
from cherrypy._json import json
# ------------------------------- Statistics -------------------------------- #
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def extrapolate_statistics(scope):
"""Return an extrapolated copy of the given scope."""
c = {}
for k, v in scope.copy().items():
if isinstance(v, dict):
v = extrapolate_statistics(v)
elif isinstance(v, (list, tuple)):
v = [extrapolate_statistics(record) for record in v]
elif hasattr(v, '__call__'):
v = v(scope)
c[k] = v
return c
# -------------------- CherryPy Applications Statistics --------------------- #
appstats = logging.statistics.setdefault('CherryPy Applications', {})
appstats.update({
'Enabled': True,
'Bytes Read/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Read'] / float(s['Total Requests'])) or
0.0
),
'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
'Bytes Written/Request': lambda s: (
s['Total Requests'] and
(s['Total Bytes Written'] / float(s['Total Requests'])) or
0.0
),
'Bytes Written/Second': lambda s: (
s['Total Bytes Written'] / s['Uptime'](s)
),
'Current Time': lambda s: time.time(),
'Current Requests': 0,
'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
'Server Version': cherrypy.__version__,
'Start Time': time.time(),
'Total Bytes Read': 0,
'Total Bytes Written': 0,
'Total Requests': 0,
'Total Time': 0,
'Uptime': lambda s: time.time() - s['Start Time'],
'Requests': {},
})
def proc_time(s):
return time.time() - s['Start Time']
class ByteCountWrapper(object):
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
self.rfile = rfile
self.bytes_read = 0
def read(self, size=-1):
data = self.rfile.read(size)
self.bytes_read += len(data)
return data
def readline(self, size=-1):
data = self.rfile.readline(size)
self.bytes_read += len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
return data
def average_uriset_time(s):
return s['Count'] and (s['Sum'] / s['Count']) or 0
def _get_threading_ident():
if sys.version_info >= (3, 3):
return threading.get_ident()
return threading._get_ident()
class StatsTool(cherrypy.Tool):
"""Record various information about the current request."""
def __init__(self):
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
self.record_start()
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][_get_threading_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
def record_stop(
self, uriset=None, slow_queries=1.0, slow_queries_count=100,
debug=False, **kwargs):
"""Record the end of a request."""
resp = cherrypy.serving.response
w = appstats['Requests'][_get_threading_ident()]
r = cherrypy.request.rfile.bytes_read
w['Bytes Read'] = r
appstats['Total Bytes Read'] += r
if resp.stream:
w['Bytes Written'] = 'chunked'
else:
cl = int(resp.headers.get('Content-Length', 0))
w['Bytes Written'] = cl
appstats['Total Bytes Written'] += cl
w['Response Status'] = \
getattr(resp, 'output_status', resp.status).decode()
w['End Time'] = time.time()
p = w['End Time'] - w['Start Time']
w['Processing Time'] = p
appstats['Total Time'] += p
appstats['Current Requests'] -= 1
if debug:
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
if uriset:
rs = appstats.setdefault('URI Set Tracking', {})
r = rs.setdefault(uriset, {
'Min': None, 'Max': None, 'Count': 0, 'Sum': 0,
'Avg': average_uriset_time})
if r['Min'] is None or p < r['Min']:
r['Min'] = p
if r['Max'] is None or p > r['Max']:
r['Max'] = p
r['Count'] += 1
r['Sum'] += p
if slow_queries and p > slow_queries:
sq = appstats.setdefault('Slow Queries', [])
sq.append(w.copy())
if len(sq) > slow_queries_count:
sq.pop(0)
cherrypy.tools.cpstats = StatsTool()
# ---------------------- CherryPy Statistics Reporting ---------------------- #
thisdir = os.path.abspath(os.path.dirname(__file__))
missing = object()
def locale_date(v):
return time.strftime('%c', time.gmtime(v))
def iso_format(v):
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
def pause_resume(ns):
def _pause_resume(enabled):
pause_disabled = ''
resume_disabled = ''
if enabled:
resume_disabled = 'disabled="disabled" '
else:
pause_disabled = 'disabled="disabled" '
return """
<form action="pause" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Pause" %s/>
</form>
<form action="resume" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Resume" %s/>
</form>
""" % (ns, pause_disabled, ns, resume_disabled)
return _pause_resume
class StatsPage(object):
formatting = {
'CherryPy Applications': {
'Enabled': pause_resume('CherryPy Applications'),
'Bytes Read/Request': '%.3f',
'Bytes Read/Second': '%.3f',
'Bytes Written/Request': '%.3f',
'Bytes Written/Second': '%.3f',
'Current Time': iso_format,
'Requests/Second': '%.3f',
'Start Time': iso_format,
'Total Time': '%.3f',
'Uptime': '%.3f',
'Slow Queries': {
'End Time': None,
'Processing Time': '%.3f',
'Start Time': iso_format,
},
'URI Set Tracking': {
'Avg': '%.3f',
'Max': '%.3f',
'Min': '%.3f',
'Sum': '%.3f',
},
'Requests': {
'Bytes Read': '%s',
'Bytes Written': '%s',
'End Time': None,
'Processing Time': '%.3f',
'Start Time': None,
},
},
'CherryPy WSGIServer': {
'Enabled': pause_resume('CherryPy WSGIServer'),
'Connections/second': '%.3f',
'Start time': iso_format,
},
}
@cherrypy.expose
def index(self):
# Transform the raw data into pretty output for HTML
yield """
<html>
<head>
<title>Statistics</title>
<style>
th, td {
padding: 0.25em 0.5em;
border: 1px solid #666699;
}
table {
border-collapse: collapse;
}
table.stats1 {
width: 100%;
}
table.stats1 th {
font-weight: bold;
text-align: right;
background-color: #CCD5DD;
}
table.stats2, h2 {
margin-left: 50px;
}
table.stats2 th {
font-weight: bold;
text-align: center;
background-color: #CCD5DD;
}
</style>
</head>
<body>
"""
for title, scalars, collections in self.get_namespaces():
yield """
<h1>%s</h1>
<table class='stats1'>
<tbody>
""" % title
for i, (key, value) in enumerate(scalars):
colnum = i % 3
if colnum == 0:
yield """
<tr>"""
yield (
"""
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" %
vars()
)
if colnum == 2:
yield """
</tr>"""
if colnum == 0:
yield """
<th></th><td></td>
<th></th><td></td>
</tr>"""
elif colnum == 1:
yield """
<th></th><td></td>
</tr>"""
yield """
</tbody>
</table>"""
for subtitle, headers, subrows in collections:
yield """
<h2>%s</h2>
<table class='stats2'>
<thead>
<tr>""" % subtitle
for key in headers:
yield """
<th>%s</th>""" % key
yield """
</tr>
</thead>
<tbody>"""
for subrow in subrows:
yield """
<tr>"""
for value in subrow:
yield """
<td>%s</td>""" % value
yield """
</tr>"""
yield """
</tbody>
</table>"""
yield """
</body>
</html>
"""
def get_namespaces(self):
"""Yield (title, scalars, collections) for each namespace."""
s = extrapolate_statistics(logging.statistics)
for title, ns in sorted(s.items()):
scalars = []
collections = []
ns_fmt = self.formatting.get(title, {})
for k, v in sorted(ns.items()):
fmt = ns_fmt.get(k, {})
if isinstance(v, dict):
headers, subrows = self.get_dict_collection(v, fmt)
collections.append((k, ['ID'] + headers, subrows))
elif isinstance(v, (list, tuple)):
headers, subrows = self.get_list_collection(v, fmt)
collections.append((k, headers, subrows))
else:
format = ns_fmt.get(k, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v = format(v)
elif format is not missing:
v = format % v
scalars.append((k, v))
yield title, scalars, collections
def get_dict_collection(self, v, formatting):
"""Return ([headers], [rows]) for the given collection."""
# E.g., the 'Requests' dict.
headers = []
vals = v.values()
for record in vals:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for k2, record in sorted(v.items()):
subrow = [k2]
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
def get_list_collection(self, v, formatting):
"""Return ([headers], [subrows]) for the given collection."""
# E.g., the 'Slow Queries' list.
headers = []
for record in v:
for k3 in record:
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if k3 not in headers:
headers.append(k3)
headers.sort()
subrows = []
for record in v:
subrow = []
for k3 in headers:
v3 = record.get(k3, '')
format = formatting.get(k3, missing)
if format is None:
# Don't output this column.
continue
if hasattr(format, '__call__'):
v3 = format(v3)
elif format is not missing:
v3 = format % v3
subrow.append(v3)
subrows.append(subrow)
return headers, subrows
if json is not None:
@cherrypy.expose
def data(self):
s = extrapolate_statistics(logging.statistics)
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(s, sort_keys=True, indent=4).encode('utf-8')
@cherrypy.expose
def pause(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = False
raise cherrypy.HTTPRedirect('./')
pause.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
@cherrypy.expose
def resume(self, namespace):
logging.statistics.get(namespace, {})['Enabled'] = True
raise cherrypy.HTTPRedirect('./')
resume.cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['POST']}
| 22,851
|
Python
|
.py
| 580
| 31.010345
| 79
| 0.585921
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,606
|
win32.py
|
rembo10_headphones/lib/cherrypy/process/win32.py
|
"""Windows service.
Requires pywin32.
"""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=20)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=20)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=20)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=20)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event
objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
'WSPBus %s Event (pid=%r)' %
(state.name, os.getpid()))
self.events[state] = event
return event
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(
events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError('The given object could not be found: %r' % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = 'Python Web Service'
_svc_display_name_ = 'Python Web Service'
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = 'pywebsvc'
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = 'Python Web Service'
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
from cherrypy import process
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
| 5,789
|
Python
|
.py
| 142
| 31.239437
| 78
| 0.614877
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,607
|
plugins.py
|
rembo10_headphones/lib/cherrypy/process/plugins.py
|
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
import _thread
from cherrypy._cpcompat import text_or_bytes
from cherrypy._cpcompat import ntob
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file
# has "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform.
The :class:`SignalHandler` will ignore errors raised from attempting
to register handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log('SIGUSR1 cannot be set on the JVM platform. '
'Using SIGUSR2 instead.')
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
# used to determine is the process is a daemon in `self._is_daemonized`
self._original_pid = os.getpid()
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def _is_daemonized(self):
"""Return boolean indicating if the current process is
running as a daemon.
The criteria to determine the `daemon` condition is to verify
if the current pid is not the same as the one that got used on
the initial construction of the plugin *and* the stdin is not
connected to a terminal.
The sole validation of the tty is not enough when the plugin
is executing inside other process like in a CI tool
(Buildbot, Jenkins).
"""
return (
self._original_pid != os.getpid() and
not os.isatty(sys.stdin.fileno())
)
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log('Restoring %s handler to SIG_DFL.' % signame)
handler = _signal.SIG_DFL
else:
self.bus.log('Restoring %s handler %r.' % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log('Restored old %s handler %r, but our '
'handler was not registered.' %
(signame, handler), level=30)
except ValueError:
self.bus.log('Unable to restore %s handler %r.' %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the
current platform, ValueError is raised.
"""
if isinstance(signal, text_or_bytes):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError('No such signal: %r' % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError('No such signal: %r' % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log('Listening for %s.' % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log('Caught signal %s.' % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if self._is_daemonized():
self.bus.log('SIGHUP caught while daemonized. Restarting.')
self.bus.restart()
else:
# not daemonized (may be foreground or background)
self.bus.log('SIGHUP caught but not daemonized. Exiting.')
self.bus.exit()
try:
import pwd
import grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to `Gavin Baker
<http://antonym.org/2005/12/dropping-privileges-in-python.html>`_.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
@property
def uid(self):
"""The uid under which to run.
Availability: Unix.
"""
return self._uid
@uid.setter
def uid(self, val):
if val is not None:
if pwd is None:
self.bus.log('pwd module not available; ignoring uid.',
level=30)
val = None
elif isinstance(val, text_or_bytes):
val = pwd.getpwnam(val)[2]
self._uid = val
@property
def gid(self):
"""The gid under which to run.
Availability: Unix.
"""
return self._gid
@gid.setter
def gid(self, val):
if val is not None:
if grp is None:
self.bus.log('grp module not available; ignoring gid.',
level=30)
val = None
elif isinstance(val, text_or_bytes):
val = grp.getgrnam(val)[2]
self._gid = val
@property
def umask(self):
"""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
"""
return self._umask
@umask.setter
def umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log('umask function not available; ignoring umask.',
level=30)
val = None
self._umask = val
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process successfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.active_count() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
self.daemonize(self.stdin, self.stdout, self.stderr, self.bus.log)
self.finalized = True
start.priority = 65
@staticmethod
def daemonize(
stdin='/dev/null', stdout='/dev/null', stderr='/dev/null',
logger=lambda msg: None):
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
error_tmpl = (
'{sys.argv[0]}: fork #{n} failed: ({exc.errno}) {exc.strerror}\n'
)
for fork in range(2):
msg = ['Forking once.', 'Forking twice.'][fork]
try:
pid = os.fork()
if pid > 0:
# This is the parent; exit.
logger(msg)
os._exit(0)
except OSError as exc:
# Python raises OSError rather than returning negative numbers.
sys.exit(error_tmpl.format(sys=sys, exc=exc, n=fork + 1))
if fork == 0:
os.setsid()
os.umask(0)
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+')
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger('Daemonized to PID: %s' % os.getpid())
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
with open(self.pidfile, 'wb') as f:
f.write(ntob('%s\n' % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
class PerpetualTimer(threading.Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which
can results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
'Error in perpetual timer thread function %r.' %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to
wait for each interval, which isn't very responsive; that is, even
if you call self.cancel(), you'll have to wait until the sleep()
call finishes before the thread stops. To compensate, it defaults to
being daemonic, which means it won't delay stopping the whole
process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
super(BackgroundTask, self).__init__()
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log('Error in background task thread function %r.'
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>`
thread.
"""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus=self.bus)
self.thread.name = threadname
self.thread.start()
self.bus.log('Started monitor thread %r.' % threadname)
else:
self.bus.log('Monitor thread %r already started.' % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log('No thread running for %s.' %
self.name or self.__class__.__name__)
else:
if self.thread is not threading.current_thread():
name = self.thread.name
self.thread.cancel()
if not self.thread.daemon:
self.bus.log('Joining %r' % name)
self.thread.join()
self.bus.log('Stopped thread %r.' % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can
adjust the ``match`` attribute, a regular expression. For example,
to stop monitoring cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
search_mod_names = filter(
re.compile(self.match).match,
list(sys.modules.keys()),
)
mods = map(sys.modules.get, search_mod_names)
return set(filter(None, map(self._file_for_module, mods)))
@classmethod
def _file_for_module(cls, module):
"""Return the relevant file for the module."""
return (
cls._archive_for_zip_module(module)
or cls._file_for_file_module(module)
)
@staticmethod
def _archive_for_zip_module(module):
"""Return the archive filename for the module if relevant."""
try:
return module.__loader__.archive
except AttributeError:
pass
@classmethod
def _file_for_file_module(cls, module):
"""Return the file for the module."""
try:
return module.__file__ and cls._make_absolute(module.__file__)
except AttributeError:
pass
@staticmethod
def _make_absolute(filename):
"""Ensure filename is absolute to avoid effect of os.chdir."""
return filename if os.path.isabs(filename) else (
os.path.normpath(os.path.join(_module__file__base, filename))
)
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log('Restarting because %s changed.' %
filename)
self.thread.cancel()
self.bus.log('Stopped thread %r.' %
self.thread.name)
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each
thread). This will register/unregister the current thread and
publish to 'start_thread' and 'stop_thread' listeners in the bus as
needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether the
thread will be re-used or not. The bus will call 'stop_thread'
listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = _thread.get_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = _thread.get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| 26,881
|
Python
|
.py
| 623
| 32.662921
| 79
| 0.592659
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,608
|
servers.py
|
rembo10_headphones/lib/cherrypy/process/servers.py
|
r"""
Starting in CherryPy 3.1, cherrypy.server is implemented as an
:ref:`Engine Plugin<plugins>`. It's an instance of
:class:`cherrypy._cpserver.Server`, which is a subclass of
:class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class
is designed to control other servers, as well.
Multiple servers/ports
======================
If you need to start more than one HTTP server (to serve on multiple ports, or
protocols, etc.), you can manually register each one and then start them all
with engine.start::
s1 = ServerAdapter(
cherrypy.engine,
MyWSGIServer(host='0.0.0.0', port=80)
)
s2 = ServerAdapter(
cherrypy.engine,
another.HTTPServer(host='127.0.0.1', SSL=True)
)
s1.subscribe()
s2.subscribe()
cherrypy.engine.start()
.. index:: SCGI
FastCGI/SCGI
============
There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
:mod:`cherrypy.process.servers`. To start an fcgi server, for example,
wrap an instance of it in a ServerAdapter::
addr = ('0.0.0.0', 4000)
f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr)
s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr)
s.subscribe()
The :doc:`cherryd</deployguide/cherryd>` startup script will do the above for
you via its `-f` flag.
Note that you need to download and install `flup <http://trac.saddi.com/flup>`_
yourself, whether you use ``cherryd`` or not.
.. _fastcgi:
.. index:: FastCGI
FastCGI
-------
A very simple setup lets your cherry run with FastCGI.
You just need the flup library,
plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
CherryPy code
^^^^^^^^^^^^^
hello.py::
#!/usr/bin/python
import cherrypy
class HelloWorld:
'''Sample request handler class.'''
@cherrypy.expose
def index(self):
return "Hello world!"
cherrypy.tree.mount(HelloWorld())
# CherryPy autoreload must be disabled for the flup server to work
cherrypy.config.update({'engine.autoreload.on':False})
Then run :doc:`/deployguide/cherryd` with the '-f' arg::
cherryd -c <myconfig> -d -f -i hello.py
Apache
^^^^^^
At the top level in httpd.conf::
FastCgiIpcDir /tmp
FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4
And inside the relevant VirtualHost section::
# FastCGI config
AddHandler fastcgi-script .fcgi
ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1
Lighttpd
^^^^^^^^
For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these
instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
active within ``server.modules``. Then, within your ``$HTTP["host"]``
directive, configure your fastcgi script like the following::
$HTTP["url"] =~ "" {
fastcgi.server = (
"/" => (
"script.fcgi" => (
"bin-path" => "/path/to/your/script.fcgi",
"socket" => "/tmp/script.sock",
"check-local" => "disable",
"disable-time" => 1,
"min-procs" => 1,
"max-procs" => 1, # adjust as needed
),
),
)
} # end of $HTTP["url"] =~ "^/"
Please see `Lighttpd FastCGI Docs
<http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for
an explanation of the possible configuration options.
"""
import os
import sys
import time
import warnings
import contextlib
import portend
class Timeouts:
occupied = 5
free = 1
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start::
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.running:
self.bus.log('Already serving on %s' % self.description)
return
self.interrupt = None
if not self.httpserver:
raise ValueError('No HTTP server has been created.')
if not os.environ.get('LISTEN_PID', None):
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bind_addr, timeout=Timeouts.free)
import threading
t = threading.Thread(target=self._start_http_thread)
t.name = 'HTTPServer ' + t.name
t.start()
self.wait()
self.running = True
self.bus.log('Serving on %s' % self.description)
start.priority = 75
@property
def description(self):
"""A description about where this server is bound."""
if self.bind_addr is None:
on_what = 'unknown interface (dynamic?)'
elif isinstance(self.bind_addr, tuple):
on_what = self._get_base()
else:
on_what = 'socket file: %s' % self.bind_addr
return on_what
def _get_base(self):
if not self.httpserver:
return ''
host, port = self.bound_addr
if getattr(self.httpserver, 'ssl_adapter', None):
scheme = 'https'
if port != 443:
host += ':%s' % port
else:
scheme = 'http'
if port != 80:
host += ':%s' % port
return '%s://%s' % (scheme, host)
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log('<Ctrl-C> hit: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
except SystemExit:
self.bus.log('SystemExit raised: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
raise
except Exception:
self.interrupt = sys.exc_info()[1]
self.bus.log('Error in HTTP server: shutting down',
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, 'ready', False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# bypass check when LISTEN_PID is set
if os.environ.get('LISTEN_PID', None):
return
# bypass check when running via socket-activation
# (for socket-activation the port will be managed by systemd)
if not isinstance(self.bind_addr, tuple):
return
# wait for port to be occupied
with _safe_wait(*self.bound_addr):
portend.occupied(*self.bound_addr, timeout=Timeouts.occupied)
@property
def bound_addr(self):
"""
The bind address, or if it's an ephemeral port and the
socket has been bound, return the actual port bound.
"""
host, port = self.bind_addr
if port == 0 and self.httpserver.socket:
# Bound to ephemeral port. Get the actual port allocated.
port = self.httpserver.socket.getsockname()[1]
return host, port
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bound_addr, timeout=Timeouts.free)
self.running = False
self.bus.log('HTTP Server %s shut down' % self.httpserver)
else:
self.bus.log('HTTP Server %s already shut down' % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = (
self.fcgiserver._threadPool._idleCount)
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
@contextlib.contextmanager
def _safe_wait(host, port):
"""
On systems where a loopback interface is not available and the
server is bound to all interfaces, it's difficult to determine
whether the server is in fact occupying the port. In this case,
just issue a warning and move on. See issue #1100.
"""
try:
yield
except portend.Timeout:
if host == portend.client_host(host):
raise
msg = 'Unable to verify that the server is bound on %r' % port
warnings.warn(msg)
| 13,420
|
Python
|
.py
| 338
| 31.789941
| 79
| 0.62206
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,609
|
__init__.py
|
rembo10_headphones/lib/cherrypy/process/__init__.py
|
"""Site container for an HTTP server.
A Web Site Process Bus object is used to connect applications, servers,
and frameworks with site-wide services such as daemonization, process
reload, signal handling, drop privileges, PID file management, logging
for all of these, and many more.
The 'plugins' module defines a few abstract and concrete services for
use with the bus. Some use tool-specific channels; see the documentation
for each class.
"""
from .wspbus import bus
from . import plugins, servers
__all__ = ('bus', 'plugins', 'servers')
| 547
|
Python
|
.py
| 12
| 44.166667
| 72
| 0.788679
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,610
|
wspbus.py
|
rembo10_headphones/lib/cherrypy/process/wspbus.py
|
r"""An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a
framework-neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.::
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
try:
import ctypes
except ImportError:
"""Google AppEngine is shipped without ctypes.
:seealso: http://stackoverflow.com/a/6523777/70170
"""
ctypes = None
import operator
import os
import sys
import threading
import time
import traceback as _traceback
import warnings
import subprocess
import functools
from more_itertools import always_iterable
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
"""Exception raised during errors on Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
"""Initialize ChannelFailures errors wrapper."""
super(ChannelFailures, self).__init__(*args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
"""Render the list of errors, which happened in channel."""
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
"""Determine whether any error happened in channel."""
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return 'states.%s' % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop
the whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
"""Initialize pub/sub bus."""
self.execv = False
self.state = states.STOPPED
channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main'
self.listeners = dict(
(channel, set())
for channel in channels
)
self._priorities = {}
def subscribe(self, channel, callback=None, priority=None):
"""Add the given callback at the given channel (if not present).
If callback is None, return a partial suitable for decorating
the callback.
"""
if callback is None:
return functools.partial(
self.subscribe,
channel,
priority=priority,
)
ch_listeners = self.listeners.setdefault(channel, set())
ch_listeners.add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
raw_items = (
(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]
)
items = sorted(raw_items, key=operator.itemgetter(0))
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except Exception:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log('Error in %r listener %r' % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""Assert that the Bus is not running in atexit handler callback."""
if self.state != states.EXITING:
warnings.warn(
'The main thread is exiting, but the Bus is in the %r state; '
'shutting it down automatically now. You must either call '
'bus.block() after start(), or call bus.exit() before the '
'main thread exits.' % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.log('Shutting down due to error in start listener:',
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except Exception:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
EX_SOFTWARE = 70
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except Exception:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(EX_SOFTWARE)
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(EX_SOFTWARE)
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling
thread; instead, it stops the bus and asks the main thread to
call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all
threads to terminate, and then calls os.execv if self.execv is
True. This design allows another thread to call bus.restart, yet
have the main thread perform the actual execv call (required on
some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://github.com/cherrypy/cherrypy/issues/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://github.com/cherrypy/cherrypy/issues/751.
self.log('Waiting for child threads to terminate...')
for t in threading.enumerate():
# Validate the we're not trying to join the MainThread
# that will cause a deadlock and the case exist when
# implemented as a windows service and in any other case
# that another thread executes cherrypy.engine.exit()
if (
t != threading.current_thread() and
not isinstance(t, threading._MainThread) and
# Note that any dummy (external) threads are
# always daemonic.
not t.daemon
):
self.log('Waiting for thread %s.' % t.name)
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
states = set(always_iterable(state))
while self.state not in states:
time.sleep(interval)
self.publish(channel)
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain
platforms (OS X) don't allow execv to be called in a child
thread very well.
"""
try:
args = self._get_true_argv()
except NotImplementedError:
"""It's probably win32 or GAE."""
args = [sys.executable] + self._get_interpreter_argv() + sys.argv
self.log('Re-spawning %s' % ' '.join(args))
self._extend_pythonpath(os.environ)
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
@staticmethod
def _get_interpreter_argv():
"""Retrieve current Python interpreter's arguments.
Returns empty tuple in case of frozen mode, uses built-in arguments
reproduction function otherwise.
Frozen mode is possible for the app has been packaged into a binary
executable using py2exe. In this case the interpreter's arguments are
already built-in into that executable.
:seealso: https://github.com/cherrypy/cherrypy/issues/1526
Ref: https://pythonhosted.org/PyInstaller/runtime-information.html
"""
return ([]
if getattr(sys, 'frozen', False)
else subprocess._args_from_interpreter_flags())
@staticmethod
def _get_true_argv():
"""Retrieve all real arguments of the python interpreter.
...even those not listed in ``sys.argv``
:seealso: http://stackoverflow.com/a/28338254/595220
:seealso: http://stackoverflow.com/a/6683222/595220
:seealso: http://stackoverflow.com/a/28414807/595220
"""
try:
char_p = ctypes.c_wchar_p
argv = ctypes.POINTER(char_p)()
argc = ctypes.c_int()
ctypes.pythonapi.Py_GetArgcArgv(
ctypes.byref(argc),
ctypes.byref(argv),
)
_argv = argv[:argc.value]
# The code below is trying to correctly handle special cases.
# `-c`'s argument interpreted by Python itself becomes `-c` as
# well. Same applies to `-m`. This snippet is trying to survive
# at least the case with `-m`
# Ref: https://github.com/cherrypy/cherrypy/issues/1545
# Ref: python/cpython@418baf9
argv_len, is_command, is_module = len(_argv), False, False
try:
m_ind = _argv.index('-m')
if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'):
"""
In some older Python versions `-m`'s argument may be
substituted with `-c`, not `-m`
"""
is_module = True
except (IndexError, ValueError):
m_ind = None
try:
c_ind = _argv.index('-c')
if c_ind < argv_len - 1 and _argv[c_ind + 1] == '-c':
is_command = True
except (IndexError, ValueError):
c_ind = None
if is_module:
"""It's containing `-m -m` sequence of arguments."""
if is_command and c_ind < m_ind:
"""There's `-c -c` before `-m`"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. Ref: "
'https://github.com/cherrypy/cherrypy/issues/1545')
# Survive module argument here
original_module = sys.argv[0]
if not os.access(original_module, os.R_OK):
"""There's no such module exist."""
raise AttributeError(
"{} doesn't seem to be a module "
'accessible by current user'.format(original_module))
del _argv[m_ind:m_ind + 2] # remove `-m -m`
# ... and substitute it with the original module path:
_argv.insert(m_ind, original_module)
elif is_command:
"""It's containing just `-c -c` sequence of arguments."""
raise RuntimeError(
"Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545')
except AttributeError:
"""It looks Py_GetArgcArgv's completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine
:seealso: https://github.com/cherrypy/cherrypy/issues/1506
:seealso: https://github.com/cherrypy/cherrypy/issues/1512
:ref: http://bit.ly/2gK6bXK
"""
raise NotImplementedError
else:
return _argv
@staticmethod
def _extend_pythonpath(env):
"""Prepend current working dir to PATH environment variable if needed.
If sys.path[0] is an empty string, the interpreter was likely
invoked with -m and the effective path is about to change on re-
exec. Add the current directory to $PYTHONPATH to ensure that
the new process sees the same path.
This issue cannot be addressed in the general case because
Python cannot reliably reconstruct the original command line (
http://bugs.python.org/issue14208).
(This idea filched from tornado.autoreload)
"""
path_prefix = '.' + os.pathsep
existing_path = env.get('PYTHONPATH', '')
needs_patch = (
sys.path[0] == '' and
not existing_path.startswith(path_prefix)
)
if needs_patch:
env['PYTHONPATH'] = path_prefix + existing_path
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files
setting for the operating system. This function will be called
just before the process is restarted via os.execv() to prevent
open files from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.name = 'Bus Callback ' + t.name
t.start()
self.start()
return t
def log(self, msg='', level=20, traceback=False):
"""Log the given message.
Append the last traceback if requested.
"""
if traceback:
msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
| 21,507
|
Python
|
.py
| 489
| 33.879346
| 79
| 0.607554
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,611
|
tzfile.py
|
rembo10_headphones/lib/pytz/tzfile.py
|
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from datetime import datetime
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB' * typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i + 2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i + 1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(ttinfo) == 1 or len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i - 1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600 * 3:
for j in range(i + 1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base, 'Australia', 'Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base, 'US', 'Eastern'), 'rb'))
pprint(tz._utc_transition_times)
| 4,723
|
Python
|
.py
| 112
| 31.6875
| 79
| 0.561438
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,612
|
lazy.py
|
rembo10_headphones/lib/pytz/lazy.py
|
from threading import RLock
try:
from collections.abc import Mapping as DictMixin
except ImportError: # Python < 3.3
try:
from UserDict import DictMixin # Python 2
except ImportError: # Python 3.0-3.3
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| 5,404
|
Python
|
.py
| 141
| 27.148936
| 75
| 0.489488
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,613
|
__init__.py
|
rembo10_headphones/lib/pytz/__init__.py
|
'''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
import sys
import datetime
import os.path
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet # noqa
from pytz.tzinfo import unpickler, BaseTzInfo
from pytz.tzfile import build_tzinfo
# The IANA (nee Olson) database is updated several times a year.
OLSON_VERSION = '2021c'
VERSION = '2021.3' # pip compatible version number.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
'BaseTzInfo', 'FixedOffset',
]
if sys.version_info[0] > 2: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
if type(s) == bytes:
s = s.decode('ASCII')
else:
s.encode('ASCII') # Raise an exception if not ASCII
return s # But the string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
It is possible to specify different location for zoneinfo
subdir by using the PYTZ_TZDATADIR environment variable.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
zoneinfo_dir = os.environ.get('PYTZ_TZDATADIR', None)
if zoneinfo_dir is not None:
filename = os.path.join(zoneinfo_dir, *name_parts)
else:
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
if os.environ.get('PYTZ_SKIPEXISTSCHECK', ''):
# In "standard" distributions, we can assume that
# all the listed timezones are present. As an
# import-speed optimization, you can set the
# PYTZ_SKIPEXISTSCHECK flag to skip checking
# for the presence of the resource file on disk.
return True
open_resource(name).close()
return True
except IOError:
return False
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone is None:
raise UnknownTimeZoneError(None)
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _case_insensitive_zone_lookup(_unmunge_zone(zone))
if zone not in _tzinfo_cache:
if zone in all_timezones_set: # noqa
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
_all_timezones_lower_to_standard = None
def _case_insensitive_zone_lookup(zone):
"""case-insensitively matching timezone, else return zone unchanged"""
global _all_timezones_lower_to_standard
if _all_timezones_lower_to_standard is None:
_all_timezones_lower_to_standard = dict((tz.lower(), tz) for tz in all_timezones) # noqa
return _all_timezones_lower_to_standard.get(zone.lower()) or zone # noqa
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(BaseTzInfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.rst, but we are not depending on Python 2.4 so integrating
the README.rst examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set: # noqa
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def FixedOffset(offset, _tzinfos={}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> str(one.utcoffset(datetime.datetime.now()))
'-1 day, 18:30:00'
>>> str(one.dst(datetime.datetime.now()))
'0:00:00'
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> str(two.utcoffset(datetime.datetime.now()))
'23:00:00'
>>> str(two.dst(datetime.datetime.now()))
'0:00:00'
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Nuuk',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qostanay',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kanton',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fort_Nelson',
'America/Fortaleza',
'America/Glace_Bay',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Nuuk',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qostanay',
'Asia/Qyzylorda',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Kanton',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| 35,163
|
Python
|
.py
| 1,439
| 20.931897
| 97
| 0.679528
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,614
|
tzinfo.py
|
rembo10_headphones/lib/pytz/tzinfo.py
|
'''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
# Sorted list of DST transition times, UTC
_utc_transition_times = None
# [(utcoffset, dstoffset, tzname)] corresponding to
# _utc_transition_times entries
_transition_info = None
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = (
self._transition_info[0])
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None and
getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6),
is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone if is_dst=True
# Choose the latest (by UTC) applicable timezone if is_dst=False
# i.e., behave like end-of-DST transition
dates = {} # utc -> local
for local_dt in filtered_possible_loc_dt:
utc_time = (
local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset)
assert utc_time not in dates
dates[utc_time] = local_dt
return dates[[min, max][not is_dst](dates)]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.utcoffset(ambiguous, is_dst=False))
'-1 day, 20:30:00'
>>> str(tz.utcoffset(ambiguous, is_dst=True))
'-1 day, 21:30:00'
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> str(tz.dst(normal))
'1:00:00'
>>> str(tz.dst(normal, is_dst=False))
'1:00:00'
>>> str(tz.dst(normal, is_dst=True))
'1:00:00'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.dst(ambiguous, is_dst=False))
'0:00:00'
>>> str(tz.dst(ambiguous, is_dst=True))
'1:00:00'
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset and
localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| 19,272
|
Python
|
.py
| 465
| 32.453763
| 78
| 0.602193
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,615
|
exceptions.py
|
rembo10_headphones/lib/pytz/exceptions.py
|
'''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class Error(Exception):
'''Base class for all exceptions raised by the pytz library'''
class UnknownTimeZoneError(KeyError, Error):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
And also a subclass of pytz.exceptions.Error, as are other pytz
exceptions.
>>> isinstance(UnknownTimeZoneError(), Error)
True
'''
pass
class InvalidTimeError(Error):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| 1,571
|
Python
|
.py
| 39
| 35.794872
| 78
| 0.751323
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,616
|
reference.py
|
rembo10_headphones/lib/pytz/reference.py
|
'''
Reference tzinfo implementations from the Python docs.
Used for testing against as they are only correct for the years
1987 to 2006. Do not use these for real code.
'''
from datetime import tzinfo, timedelta, datetime
from pytz import HOUR, ZERO, UTC
__all__ = [
'FixedOffset',
'LocalTimezone',
'USTimeZone',
'Eastern',
'Central',
'Mountain',
'Pacific',
'UTC'
]
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
import time as _time
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# A class capturing the platform's idea of local time.
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct.
# which is the first Sunday on or after Oct 25.
DSTEND = datetime(1, 10, 25, 1)
# A complete implementation of current DST rules for major US time zones.
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April & the last in October.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| 3,778
|
Python
|
.py
| 107
| 28.878505
| 76
| 0.639637
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,617
|
edit.py
|
rembo10_headphones/lib/beetsplug/edit.py
|
# This file is part of beets.
# Copyright 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from beets import plugins
from beets import util
from beets import ui
from beets.dbcore import types
from beets.importer import action
from beets.ui.commands import _do_query, PromptChoice
import codecs
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import os
import shlex
# These "safe" types can avoid the format/parse cycle that most fields go
# through: they are safe to edit with native YAML types.
SAFE_TYPES = (types.Float, types.Integer, types.Boolean)
class ParseError(Exception):
"""The modified file is unreadable. The user should be offered a chance to
fix the error.
"""
def edit(filename, log):
"""Open `filename` in a text editor.
"""
cmd = shlex.split(util.editor_command())
cmd.append(filename)
log.debug('invoking editor command: {!r}', cmd)
try:
subprocess.call(cmd)
except OSError as exc:
raise ui.UserError('could not run editor command {!r}: {}'.format(
cmd[0], exc
))
def dump(arg):
"""Dump a sequence of dictionaries as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a sequence of YAML documents back to a list of dictionaries
with string keys.
Can raise a `ParseError`.
"""
try:
out = []
for d in yaml.safe_load_all(s):
if not isinstance(d, dict):
raise ParseError(
'each entry must be a dictionary; found {}'.format(
type(d).__name__
)
)
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({str(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError(f'invalid YAML: {e}')
return out
def _safe_value(obj, key, value):
"""Check whether the `value` is safe to represent in YAML and trust as
returned from parsed YAML.
This ensures that values do not change their type when the user edits their
YAML representation.
"""
typ = obj._type(key)
return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
def flatten(obj, fields):
"""Represent `obj`, a `dbcore.Model` object, as a dictionary for
serialization. Only include the given `fields` if provided;
otherwise, include everything.
The resulting dictionary's keys are strings and the values are
safely YAML-serializable types.
"""
# Format each value.
d = {}
for key in obj.keys():
value = obj[key]
if _safe_value(obj, key, value):
# A safe value that is faithfully representable in YAML.
d[key] = value
else:
# A value that should be edited as a string.
d[key] = obj.formatted()[key]
# Possibly filter field names.
if fields:
return {k: v for k, v in d.items() if k in fields}
else:
return d
def apply_(obj, data):
"""Set the fields of a `dbcore.Model` object according to a
dictionary.
This is the opposite of `flatten`. The `data` dictionary should have
strings as values.
"""
for key, value in data.items():
if _safe_value(obj, key, value):
# A safe value *stayed* represented as a safe type. Assign it
# directly.
obj[key] = value
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, str(value))
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
# The default fields to edit.
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
# Silently ignore any changes to these fields.
'ignore_fields': 'id path',
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
edit_command = ui.Subcommand(
'edit',
help='interactively edit metadata'
)
edit_command.parser.add_option(
'-f', '--field',
metavar='FIELD',
action='append',
help='edit this field also',
)
edit_command.parser.add_option(
'--all',
action='store_true', dest='all',
help='edit all fields',
)
edit_command.parser.add_album_option()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command.
"""
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_('Nothing to edit.')
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self._get_fields(opts.album, opts.field)
self.edit(opts.album, objs, fields)
def _get_fields(self, album, extra):
"""Get the set of fields to edit.
"""
# Start with the configured base fields.
if album:
fields = self.config['albumfields'].as_str_seq()
else:
fields = self.config['itemfields'].as_str_seq()
# Add the requested extra fields.
if extra:
fields += extra
# Ensure we always have the `id` field for identification.
fields.append('id')
return set(fields)
def edit(self, album, objs, fields):
"""The core editor function.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit (or None to edit
everything).
"""
# Present the YAML to the user and let her change it.
success = self.edit_objects(objs, fields)
# Save the new data.
if success:
self.save_changes(objs)
def edit_objects(self, objs, fields):
"""Dump a set of Model objects to a file as text, ask the user
to edit it, and apply any changes to the objects.
Return a boolean indicating whether the edit succeeded.
"""
# Get the content to edit as raw data structures.
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False,
encoding='utf-8')
old_str = dump(old_data)
new.write(old_str)
new.close()
# Loop until we have parseable data and the user confirms.
try:
while True:
# Ask the user to edit the data.
edit(new.name, self._log)
# Read the data back after editing and check whether anything
# changed.
with codecs.open(new.name, encoding='utf-8') as f:
new_str = f.read()
if new_str == old_str:
ui.print_("No changes; aborting.")
return False
# Parse the updated data.
try:
new_data = load(new_str)
except ParseError as e:
ui.print_(f"Could not read data: {e}")
if ui.input_yn("Edit again to fix? (Y/n)", True):
continue
else:
return False
# Show the changes.
# If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes.
objs_old = [obj.copy() if obj.id < 0 else None
for obj in objs]
self.apply_data(objs, old_data, new_data)
changed = False
for obj, obj_old in zip(objs, objs_old):
changed |= ui.show_model_changes(obj, obj_old)
if not changed:
ui.print_('No changes to apply.')
return False
# Confirm the changes.
choice = ui.input_options(
('continue Editing', 'apply', 'cancel')
)
if choice == 'a': # Apply.
return True
elif choice == 'c': # Cancel.
return False
elif choice == 'e': # Keep editing.
# Reset the temporary changes to the objects. I we have a
# copy from above, use that, else reload from the database.
objs = [(old_obj or obj)
for old_obj, obj in zip(objs_old, objs)]
for obj in objs:
if not obj.id < 0:
obj.load()
continue
# Remove the temporary file before returning.
finally:
os.remove(new.name)
def apply_data(self, objs, old_data, new_data):
"""Take potentially-updated data and apply it to a set of Model
objects.
The objects are not written back to the database, so the changes
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warning('number of objects changed from {} to {}',
len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs}
ignore_fields = self.config['ignore_fields'].as_str_seq()
for old_dict, new_dict in zip(old_data, new_data):
# Prohibit any changes to forbidden fields to avoid
# clobbering `id` and such by mistake.
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warning('ignoring object whose {} changed', key)
forbidden = True
break
if forbidden:
continue
id_ = int(old_dict['id'])
apply_(obj_by_id[id_], new_dict)
def save_changes(self, objs):
"""Save a list of updated Model objects to the database.
"""
# Save to the database and possibly write tags.
for ob in objs:
if ob._dirty:
self._log.debug('saving changes to {}', ob)
ob.try_sync(ui.should_write(), ui.should_move())
# Methods for interactive importer execution.
def before_choose_candidate_listener(self, session, task):
"""Append an "Edit" choice and an "edit Candidates" choice (if
there are candidates) to the interactive importer prompt.
"""
choices = [PromptChoice('d', 'eDit', self.importer_edit)]
if task.candidates:
choices.append(PromptChoice('c', 'edit Candidates',
self.importer_edit_candidate))
return choices
def importer_edit(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on the *original* item tags.
"""
# Assign negative temporary ids to Items that are not in the database
# yet. By using negative values, no clash with items in the database
# can occur.
for i, obj in enumerate(task.items, start=1):
# The importer may set the id to None when re-importing albums.
if not obj._db or obj.id is None:
obj.id = -i
# Present the YAML to the user and let her change it.
fields = self._get_fields(album=False, extra=[])
success = self.edit_objects(task.items, fields)
# Remove temporary ids.
for obj in task.items:
if obj.id < 0:
obj.id = None
# Save the new data.
if success:
# Return action.RETAG, which makes the importer write the tags
# to the files if needed without re-applying metadata.
return action.RETAG
else:
# Edit cancelled / no edits made. Revert changes.
for obj in task.items:
obj.read()
def importer_edit_candidate(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on a *candidate*. The candidate's metadata is
applied to the original items.
"""
# Prompt the user for a candidate.
sel = ui.input_options([], numrange=(1, len(task.candidates)))
# Force applying the candidate on the items.
task.match = task.candidates[sel - 1]
task.apply_metadata()
return self.importer_edit(session, task)
| 13,892
|
Python
|
.py
| 338
| 30.573964
| 79
| 0.579911
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,618
|
smartplaylist.py
|
rembo10_headphones/lib/beetsplug/smartplaylist.py
|
# This file is part of beets.
# Copyright 2016, Dang Mai <contact@dangmai.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from beets.plugins import BeetsPlugin
from beets import ui
from beets.util import (mkdirall, normpath, sanitize_path, syspath,
bytestring_path, path_as_posix)
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError
import os
try:
from urllib.request import pathname2url
except ImportError:
# python2 is a bit different
from urllib import pathname2url
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'relative_to': None,
'playlist_dir': '.',
'auto': True,
'playlists': [],
'forward_slash': False,
'prefix': '',
'urlencode': False,
})
self.config['prefix'].redact = True # May contain username/password.
self._matched_playlists = None
self._unmatched_playlists = None
if self.config['auto']:
self.register_listener('database_change', self.db_change)
def commands(self):
spl_update = ui.Subcommand(
'splupdate',
help='update the smart playlists. Playlist names may be '
'passed as arguments.'
)
spl_update.func = self.update_cmd
return [spl_update]
def update_cmd(self, lib, opts, args):
self.build_queries()
if args:
args = set(ui.decargs(args))
for a in list(args):
if not a.endswith(".m3u"):
args.add(f"{a}.m3u")
playlists = {(name, q, a_q)
for name, q, a_q in self._unmatched_playlists
if name in args}
if not playlists:
raise ui.UserError(
'No playlist matching any of {} found'.format(
[name for name, _, _ in self._unmatched_playlists])
)
self._matched_playlists = playlists
self._unmatched_playlists -= playlists
else:
self._matched_playlists = self._unmatched_playlists
self.update_playlists(lib)
def build_queries(self):
"""
Instantiate queries for the playlists.
Each playlist has 2 queries: one or items one for albums, each with a
sort. We must also remember its name. _unmatched_playlists is a set of
tuples (name, (q, q_sort), (album_q, album_q_sort)).
sort may be any sort, or NullSort, or None. None and NullSort are
equivalent and both eval to False.
More precisely
- it will be NullSort when a playlist query ('query' or 'album_query')
is a single item or a list with 1 element
- it will be None when there are multiple items i a query
"""
self._unmatched_playlists = set()
self._matched_playlists = set()
for playlist in self.config['playlists'].get(list):
if 'name' not in playlist:
self._log.warning("playlist configuration is missing name")
continue
playlist_data = (playlist['name'],)
try:
for key, model_cls in (('query', Item),
('album_query', Album)):
qs = playlist.get(key)
if qs is None:
query_and_sort = None, None
elif isinstance(qs, str):
query_and_sort = parse_query_string(qs, model_cls)
elif len(qs) == 1:
query_and_sort = parse_query_string(qs[0], model_cls)
else:
# multiple queries and sorts
queries, sorts = zip(*(parse_query_string(q, model_cls)
for q in qs))
query = OrQuery(queries)
final_sorts = []
for s in sorts:
if s:
if isinstance(s, MultipleSort):
final_sorts += s.sorts
else:
final_sorts.append(s)
if not final_sorts:
sort = None
elif len(final_sorts) == 1:
sort, = final_sorts
else:
sort = MultipleSort(final_sorts)
query_and_sort = query, sort
playlist_data += (query_and_sort,)
except ParsingError as exc:
self._log.warning("invalid query in playlist {}: {}",
playlist['name'], exc)
continue
self._unmatched_playlists.add(playlist_data)
def matches(self, model, query, album_query):
if album_query and isinstance(model, Album):
return album_query.match(model)
if query and isinstance(model, Item):
return query.match(model)
return False
def db_change(self, lib, model):
if self._unmatched_playlists is None:
self.build_queries()
for playlist in self._unmatched_playlists:
n, (q, _), (a_q, _) = playlist
if self.matches(model, q, a_q):
self._log.debug(
"{0} will be updated because of {1}", n, model)
self._matched_playlists.add(playlist)
self.register_listener('cli_exit', self.update_playlists)
self._unmatched_playlists -= self._matched_playlists
def update_playlists(self, lib):
self._log.info("Updating {0} smart playlists...",
len(self._matched_playlists))
playlist_dir = self.config['playlist_dir'].as_filename()
playlist_dir = bytestring_path(playlist_dir)
relative_to = self.config['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
# Maps playlist filenames to lists of track filenames.
m3us = {}
for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist
self._log.debug("Creating playlist {0}", name)
items = []
if query:
items.extend(lib.items(query, q_sort))
if album_query:
for album in lib.albums(album_query, a_q_sort):
items.extend(album.items())
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(name, True)
m3u_name = sanitize_path(m3u_name, lib.replacements)
if m3u_name not in m3us:
m3us[m3u_name] = []
item_path = item.path
if relative_to:
item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path)
prefix = bytestring_path(self.config['prefix'].as_str())
# Write all of the accumulated track lists to files.
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path)
with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]:
if self.config['forward_slash'].get():
path = path_as_posix(path)
if self.config['urlencode']:
path = bytestring_path(pathname2url(path))
f.write(prefix + path + b'\n')
self._log.info("{0} playlists updated", len(self._matched_playlists))
| 8,735
|
Python
|
.py
| 190
| 32.147368
| 79
| 0.546704
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,619
|
unimported.py
|
rembo10_headphones/lib/beetsplug/unimported.py
|
# This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
List all files in the library folder which are not listed in the
beets library database, including art files
"""
import os
from beets import util
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
__author__ = 'https://github.com/MrNuggelz'
class Unimported(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
'ignore_extensions': []
}
)
def commands(self):
def print_unimported(lib, opts, args):
ignore_exts = [
('.' + x).encode()
for x in self.config["ignore_extensions"].as_str_seq()
]
ignore_dirs = [
os.path.join(lib.directory, x.encode())
for x in self.config["ignore_subdirectories"].as_str_seq()
]
in_folder = {
os.path.join(r, file)
for r, d, f in os.walk(lib.directory)
for file in f
if not any(
[file.endswith(ext) for ext in ignore_exts]
+ [r in ignore_dirs]
)
}
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
print_(util.displayable_path(f))
unimported = Subcommand(
'unimported',
help='list all files in the library folder which are not listed'
' in the beets library database')
unimported.func = print_unimported
return [unimported]
| 2,292
|
Python
|
.py
| 59
| 30
| 76
| 0.605665
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,620
|
absubmit.py
|
rembo10_headphones/lib/beetsplug/absubmit.py
|
# This file is part of beets.
# Copyright 2016, Pieter Mulder.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Calculate acoustic information and submit to AcousticBrainz.
"""
import errno
import hashlib
import json
import os
import subprocess
import tempfile
from distutils.spawn import find_executable
import requests
from beets import plugins
from beets import util
from beets import ui
# We use this field to check whether AcousticBrainz info is present.
PROBE_FIELD = 'mood_acoustic'
class ABSubmitError(Exception):
"""Raised when failing to analyse file with extractor."""
def call(args):
"""Execute the command and return its output.
Raise a AnalysisABSubmitError on failure.
"""
try:
return util.command_output(args).stdout
except subprocess.CalledProcessError as e:
raise ABSubmitError(
'{} exited with status {}'.format(args[0], e.returncode)
)
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'extractor': '',
'force': False,
'pretend': False
})
self.extractor = self.config['extractor'].as_str()
if self.extractor:
self.extractor = util.normpath(self.extractor)
# Expicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
'Extractor command does not exist: {0}.'.
format(self.extractor)
)
else:
# Implicit path to extractor, search for it in path
self.extractor = 'streaming_extractor_music'
try:
call([self.extractor])
except OSError:
raise ui.UserError(
'No extractor command found: please install the extractor'
' binary from https://acousticbrainz.org/download'
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
# the correct amount of arguments.
pass
# Get the executable location on the system, which we need
# to calculate the SHA-1 hash.
self.extractor = find_executable(self.extractor)
# Calculate extractor hash.
self.extractor_sha = hashlib.sha1()
with open(self.extractor, 'rb') as extractor:
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
def commands(self):
cmd = ui.Subcommand(
'absubmit',
help='calculate and submit AcousticBrainz analysis'
)
cmd.parser.add_option(
'-f', '--force', dest='force_refetch',
action='store_true', default=False,
help='re-download data when already present'
)
cmd.parser.add_option(
'-p', '--pretend', dest='pretend_fetch',
action='store_true', default=False,
help='pretend to perform action, but show \
only files which would be processed'
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
self.opts = opts
util.par_map(self.analyze_submit, items)
def analyze_submit(self, item):
analysis = self._get_analysis(item)
if analysis:
self._submit_data(item, analysis)
def _get_analysis(self, item):
mbid = item['mb_trackid']
# Avoid re-analyzing files that already have AB data.
if not self.opts.force_refetch and not self.config['force']:
if item.get(PROBE_FIELD):
return None
# If file has no MBID, skip it.
if not mbid:
self._log.info('Not analysing {}, missing '
'musicbrainz track id.', item)
return None
if self.opts.pretend_fetch or self.config['pretend']:
self._log.info('pretend action - extract item: {}', item)
return None
# Temporary file to save extractor output to, extractor only works
# if an output file is given. Here we use a temporary file to copy
# the data into a python object and then remove the file from the
# system.
tmp_file, filename = tempfile.mkstemp(suffix='.json')
try:
# Close the file, so the extractor can overwrite it.
os.close(tmp_file)
try:
call([self.extractor, util.syspath(item.path), filename])
except ABSubmitError as e:
self._log.warning(
'Failed to analyse {item} for AcousticBrainz: {error}',
item=item, error=e
)
return None
with open(filename) as tmp_file:
analysis = json.load(tmp_file)
# Add the hash to the output.
analysis['metadata']['version']['essentia_build_sha'] = \
self.extractor_sha
return analysis
finally:
try:
os.remove(filename)
except OSError as e:
# ENOENT means file does not exist, just ignore this error.
if e.errno != errno.ENOENT:
raise
def _submit_data(self, item, data):
mbid = item['mb_trackid']
headers = {'Content-Type': 'application/json'}
response = requests.post(self.base_url.format(mbid=mbid),
json=data, headers=headers)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:
try:
message = response.json()['message']
except (ValueError, KeyError) as e:
message = f'unable to get error message: {e}'
self._log.error(
'Failed to submit AcousticBrainz analysis of {item}: '
'{message}).', item=item, message=message
)
else:
self._log.debug('Successfully submitted AcousticBrainz analysis '
'for {}.', item)
| 6,915
|
Python
|
.py
| 168
| 30.827381
| 78
| 0.600536
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,621
|
parentwork.py
|
rembo10_headphones/lib/beetsplug/parentwork.py
|
# This file is part of beets.
# Copyright 2017, Dorian Soergel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Gets parent work, its disambiguation and id, composer, composer sort name
and work composition date
"""
from beets import ui
from beets.plugins import BeetsPlugin
import musicbrainzngs
def direct_parent_id(mb_workid, work_date=None):
"""Given a Musicbrainz work id, find the id one of the works the work is
part of and the first composition date it encounters.
"""
work_info = musicbrainzngs.get_work_by_id(mb_workid,
includes=["work-rels",
"artist-rels"])
if 'artist-relation-list' in work_info['work'] and work_date is None:
for artist in work_info['work']['artist-relation-list']:
if artist['type'] == 'composer':
if 'end' in artist.keys():
work_date = artist['end']
if 'work-relation-list' in work_info['work']:
for direct_parent in work_info['work']['work-relation-list']:
if direct_parent['type'] == 'parts' \
and direct_parent.get('direction') == 'backward':
direct_id = direct_parent['work']['id']
return direct_id, work_date
return None, work_date
def work_parent_id(mb_workid):
"""Find the parent work id and composition date of a work given its id.
"""
work_date = None
while True:
new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)
if not new_mb_workid:
return mb_workid, work_date
mb_workid = new_mb_workid
return mb_workid, work_date
def find_parentwork_info(mb_workid):
"""Get the MusicBrainz information dict about a parent work, including
the artist relations, and the composition date for a work's parent work.
"""
parent_id, work_date = work_parent_id(mb_workid)
work_info = musicbrainzngs.get_work_by_id(parent_id,
includes=["artist-rels"])
return work_info, work_date
class ParentWorkPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': False,
'force': False,
})
if self.config['auto']:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
force_parent = self.config['force'].get(bool)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
changed = self.find_work(item, force_parent)
if changed:
item.store()
if write:
item.try_write()
command = ui.Subcommand(
'parentwork',
help='fetch parent works, composers and dates')
command.parser.add_option(
'-f', '--force', dest='force',
action='store_true', default=None,
help='re-fetch when parent work is already present')
command.func = func
return [command]
def imported(self, session, task):
"""Import hook for fetching parent works automatically.
"""
force_parent = self.config['force'].get(bool)
for item in task.imported_items():
self.find_work(item, force_parent)
item.store()
def get_info(self, item, work_info):
"""Given the parent work info dict, fetch parent_composer,
parent_composer_sort, parentwork, parentwork_disambig, mb_workid and
composer_ids.
"""
parent_composer = []
parent_composer_sort = []
parentwork_info = {}
composer_exists = False
if 'artist-relation-list' in work_info['work']:
for artist in work_info['work']['artist-relation-list']:
if artist['type'] == 'composer':
composer_exists = True
parent_composer.append(artist['artist']['name'])
parent_composer_sort.append(artist['artist']['sort-name'])
if 'end' in artist.keys():
parentwork_info["parentwork_date"] = artist['end']
parentwork_info['parent_composer'] = ', '.join(parent_composer)
parentwork_info['parent_composer_sort'] = ', '.join(
parent_composer_sort)
if not composer_exists:
self._log.debug(
'no composer for {}; add one at '
'https://musicbrainz.org/work/{}',
item, work_info['work']['id'],
)
parentwork_info['parentwork'] = work_info['work']['title']
parentwork_info['mb_parentworkid'] = work_info['work']['id']
if 'disambiguation' in work_info['work']:
parentwork_info['parentwork_disambig'] = work_info[
'work']['disambiguation']
else:
parentwork_info['parentwork_disambig'] = None
return parentwork_info
def find_work(self, item, force):
"""Finds the parent work of a recording and populates the tags
accordingly.
The parent work is found recursively, by finding the direct parent
repeatedly until there are no more links in the chain. We return the
final, topmost work in the chain.
Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,
parent_composer, parent_composer_sort and work_date are populated.
"""
if not item.mb_workid:
self._log.info('No work for {}, \
add one at https://musicbrainz.org/recording/{}', item, item.mb_trackid)
return
hasparent = hasattr(item, 'parentwork')
work_changed = True
if hasattr(item, 'parentwork_workid_current'):
work_changed = item.parentwork_workid_current != item.mb_workid
if force or not hasparent or work_changed:
try:
work_info, work_date = find_parentwork_info(item.mb_workid)
except musicbrainzngs.musicbrainz.WebServiceError as e:
self._log.debug("error fetching work: {}", e)
return
parent_info = self.get_info(item, work_info)
parent_info['parentwork_workid_current'] = item.mb_workid
if 'parent_composer' in parent_info:
self._log.debug("Work fetched: {} - {}",
parent_info['parentwork'],
parent_info['parent_composer'])
else:
self._log.debug("Work fetched: {} - no parent composer",
parent_info['parentwork'])
elif hasparent:
self._log.debug("{}: Work present, skipping", item)
return
# apply all non-null values to the item
for key, value in parent_info.items():
if value:
item[key] = value
if work_date:
item['work_date'] = work_date
return ui.show_model_changes(
item, fields=['parentwork', 'parentwork_disambig',
'mb_parentworkid', 'parent_composer',
'parent_composer_sort', 'work_date',
'parentwork_workid_current', 'parentwork_date'])
| 7,948
|
Python
|
.py
| 172
| 34.715116
| 78
| 0.586403
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,622
|
play.py
|
rembo10_headphones/lib/beetsplug/play.py
|
# This file is part of beets.
# Copyright 2016, David Hamp-Gonsalves
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Send the results of a query to the configured music player as a playlist.
"""
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui.commands import PromptChoice
from beets import config
from beets import ui
from beets import util
from os.path import relpath
from tempfile import NamedTemporaryFile
import subprocess
import shlex
# Indicate where arguments should be inserted into the command string.
# If this is missing, they're placed at the end.
ARGS_MARKER = '$args'
def play(command_str, selection, paths, open_args, log, item_type='track',
keep_open=False):
"""Play items in paths with command_str and optional arguments. If
keep_open, return to beets, otherwise exit once command runs.
"""
# Print number of tracks or albums to be played, log command to be run.
item_type += 's' if len(selection) > 1 else ''
ui.print_('Playing {} {}.'.format(len(selection), item_type))
log.debug('executing command: {} {!r}', command_str, open_args)
try:
if keep_open:
command = shlex.split(command_str)
command = command + open_args
subprocess.call(command)
else:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
f"Could not play the query: {exc}")
class PlayPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config['play'].add({
'command': None,
'use_folders': False,
'relative_to': None,
'raw': False,
'warning_threshold': 100,
'bom': False,
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
play_command = Subcommand(
'play',
help='send music to a player as a playlist'
)
play_command.parser.add_album_option()
play_command.parser.add_option(
'-A', '--args',
action='store',
help='add additional arguments to the command',
)
play_command.parser.add_option(
'-y', '--yes',
action="store_true",
help='skip the warning threshold',
)
play_command.func = self._play_command
return [play_command]
def _play_command(self, lib, opts, args):
"""The CLI command function for `beet play`. Create a list of paths
from query, determine if tracks or albums are to be played.
"""
use_folders = config['play']['use_folders'].get(bool)
relative_to = config['play']['relative_to'].get()
if relative_to:
relative_to = util.normpath(relative_to)
# Perform search by album and add folders rather than tracks to
# playlist.
if opts.album:
selection = lib.albums(ui.decargs(args))
paths = []
sort = lib.get_default_album_sort()
for album in selection:
if use_folders:
paths.append(album.item_dir())
else:
paths.extend(item.path
for item in sort.sort(album.items()))
item_type = 'album'
# Perform item query and add tracks to playlist.
else:
selection = lib.items(ui.decargs(args))
paths = [item.path for item in selection]
item_type = 'track'
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
if not selection:
ui.print_(ui.colorize('text_warning',
f'No {item_type} to play.'))
return
open_args = self._playlist_or_paths(paths)
command_str = self._command_str(opts.args)
# Check if the selection exceeds configured threshold. If True,
# cancel, otherwise proceed with play command.
if opts.yes or not self._exceeds_threshold(
selection, command_str, open_args, item_type):
play(command_str, selection, paths, open_args, self._log,
item_type)
def _command_str(self, args=None):
"""Create a command string from the config command and optional args.
"""
command_str = config['play']['command'].get()
if not command_str:
return util.open_anything()
# Add optional arguments to the player command.
if args:
if ARGS_MARKER in command_str:
return command_str.replace(ARGS_MARKER, args)
else:
return f"{command_str} {args}"
else:
# Don't include the marker in the command.
return command_str.replace(" " + ARGS_MARKER, "")
def _playlist_or_paths(self, paths):
"""Return either the raw paths of items or a playlist of the items.
"""
if config['play']['raw']:
return paths
else:
return [self._create_tmp_playlist(paths)]
def _exceeds_threshold(self, selection, command_str, open_args,
item_type='track'):
"""Prompt user whether to abort if playlist exceeds threshold. If
True, cancel playback. If False, execute play command.
"""
warning_threshold = config['play']['warning_threshold'].get(int)
# Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold:
if len(selection) > 1:
item_type += 's'
ui.print_(ui.colorize(
'text_warning',
'You are about to queue {} {}.'.format(
len(selection), item_type)))
if ui.input_options(('Continue', 'Abort')) == 'a':
return True
return False
def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename.
"""
utf8_bom = config['play']['bom'].get(bool)
m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False)
if utf8_bom:
m3u.write(b'\xEF\xBB\xBF')
for item in paths_list:
m3u.write(item + b'\n')
m3u.close()
return m3u.name
def before_choose_candidate_listener(self, session, task):
"""Append a "Play" choice to the interactive importer prompt.
"""
return [PromptChoice('y', 'plaY', self.importer_play)]
def importer_play(self, session, task):
"""Get items from current import task and send to play function.
"""
selection = task.items
paths = [item.path for item in selection]
open_args = self._playlist_or_paths(paths)
command_str = self._command_str()
if not self._exceeds_threshold(selection, command_str, open_args):
play(command_str, selection, paths, open_args, self._log,
keep_open=True)
| 7,677
|
Python
|
.py
| 182
| 32.516484
| 77
| 0.60386
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,623
|
importfeeds.py
|
rembo10_headphones/lib/beetsplug/importfeeds.py
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Write paths of imported files in various formats to ease later import in a
music player. Also allow printing the new file locations to stdout in case
one wants to manually add music to a player by its path.
"""
import datetime
import os
import re
from beets.plugins import BeetsPlugin
from beets.util import mkdirall, normpath, syspath, bytestring_path, link
from beets import config
M3U_DEFAULT_NAME = 'imported.m3u'
def _build_m3u_filename(basename):
"""Builds unique m3u filename by appending given basename to current
date."""
basename = re.sub(r"[\s,/\\'\"]", '_', basename)
date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M")
path = normpath(os.path.join(
config['importfeeds']['dir'].as_filename(),
date + '_' + basename + '.m3u'
))
return path
def _write_m3u(m3u_path, items_paths):
"""Append relative paths to items into m3u file.
"""
mkdirall(m3u_path)
with open(syspath(m3u_path), 'ab') as f:
for path in items_paths:
f.write(path + b'\n')
class ImportFeedsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'formats': [],
'm3u_name': 'imported.m3u',
'dir': None,
'relative_to': None,
'absolute_path': False,
})
relative_to = self.config['relative_to'].get()
if relative_to:
self.config['relative_to'] = normpath(relative_to)
else:
self.config['relative_to'] = self.get_feeds_dir()
self.register_listener('album_imported', self.album_imported)
self.register_listener('item_imported', self.item_imported)
def get_feeds_dir(self):
feeds_dir = self.config['dir'].get()
if feeds_dir:
return os.path.expanduser(bytestring_path(feeds_dir))
return config['directory'].as_filename()
def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format
"""
feedsdir = bytestring_path(self.get_feeds_dir())
formats = self.config['formats'].as_str_seq()
relative_to = self.config['relative_to'].get() \
or self.get_feeds_dir()
relative_to = bytestring_path(relative_to)
paths = []
for item in items:
if self.config['absolute_path']:
paths.append(item.path)
else:
try:
relpath = os.path.relpath(item.path, relative_to)
except ValueError:
# On Windows, it is sometimes not possible to construct a
# relative path (if the files are on different disks).
relpath = item.path
paths.append(relpath)
if 'm3u' in formats:
m3u_basename = bytestring_path(
self.config['m3u_name'].as_str())
m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths)
if 'm3u_multi' in formats:
m3u_path = _build_m3u_filename(basename)
_write_m3u(m3u_path, paths)
if 'link' in formats:
for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
link(path, dest)
if 'echo' in formats:
self._log.info("Location of imported music:")
for path in paths:
self._log.info(" {0}", path)
def album_imported(self, lib, album):
self._record_items(lib, album.album, album.items())
def item_imported(self, lib, item):
self._record_items(lib, item.title, [item])
| 4,392
|
Python
|
.py
| 104
| 33.740385
| 77
| 0.622274
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,624
|
inline.py
|
rembo10_headphones/lib/beetsplug/inline.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows inline path template customization code in the config file.
"""
import traceback
import itertools
from beets.plugins import BeetsPlugin
from beets import config
FUNC_NAME = '__INLINE_FUNC__'
class InlineError(Exception):
"""Raised when a runtime error occurs in an inline expression.
"""
def __init__(self, code, exc):
super().__init__(
("error in inline path field code:\n"
"%s\n%s: %s") % (code, type(exc).__name__, str(exc))
)
def _compile_func(body):
"""Given Python code for a function body, return a compiled
callable that invokes that code.
"""
body = 'def {}():\n {}'.format(
FUNC_NAME,
body.replace('\n', '\n ')
)
code = compile(body, 'inline', 'exec')
env = {}
eval(code, env)
return env[FUNC_NAME]
class InlinePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config.add({
'pathfields': {}, # Legacy name.
'item_fields': {},
'album_fields': {},
})
# Item fields.
for key, view in itertools.chain(config['item_fields'].items(),
config['pathfields'].items()):
self._log.debug('adding item field {0}', key)
func = self.compile_inline(view.as_str(), False)
if func is not None:
self.template_fields[key] = func
# Album fields.
for key, view in config['album_fields'].items():
self._log.debug('adding album field {0}', key)
func = self.compile_inline(view.as_str(), True)
if func is not None:
self.album_template_fields[key] = func
def compile_inline(self, python_code, album):
"""Given a Python expression or function body, compile it as a path
field function. The returned function takes a single argument, an
Item, and returns a Unicode string. If the expression cannot be
compiled, then an error is logged and this function returns None.
"""
# First, try compiling as a single function.
try:
code = compile(f'({python_code})', 'inline', 'eval')
except SyntaxError:
# Fall back to a function body.
try:
func = _compile_func(python_code)
except SyntaxError:
self._log.error('syntax error in inline field definition:\n'
'{0}', traceback.format_exc())
return
else:
is_expr = False
else:
is_expr = True
def _dict_for(obj):
out = dict(obj)
if album:
out['items'] = list(obj.items())
return out
if is_expr:
# For expressions, just evaluate and return the result.
def _expr_func(obj):
values = _dict_for(obj)
try:
return eval(code, values)
except Exception as exc:
raise InlineError(python_code, exc)
return _expr_func
else:
# For function bodies, invoke the function with values as global
# variables.
def _func_func(obj):
old_globals = dict(func.__globals__)
func.__globals__.update(_dict_for(obj))
try:
return func()
except Exception as exc:
raise InlineError(python_code, exc)
finally:
func.__globals__.clear()
func.__globals__.update(old_globals)
return _func_func
| 4,359
|
Python
|
.py
| 110
| 29.409091
| 76
| 0.570754
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,625
|
plexupdate.py
|
rembo10_headphones/lib/beetsplug/plexupdate.py
|
"""Updates an Plex library whenever the beets library is changed.
Plex Home users enter the Plex Token to enable updating.
Put something like the following in your config.yaml to configure:
plex:
host: localhost
port: 32400
token: token
"""
import requests
from xml.etree import ElementTree
from urllib.parse import urljoin, urlencode
from beets import config
from beets.plugins import BeetsPlugin
def get_music_section(host, port, token, library_name, secure,
ignore_cert_errors):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = append_token('library/sections', token)
url = urljoin('{}://{}:{}'.format(get_protocol(secure), host,
port), api_endpoint)
# Sends request.
r = requests.get(url, verify=not ignore_cert_errors)
# Parse xml tree and extract music section key.
tree = ElementTree.fromstring(r.content)
for child in tree.findall('Directory'):
if child.get('title') == library_name:
return child.get('key')
def update_plex(host, port, token, library_name, secure,
ignore_cert_errors):
"""Ignore certificate errors if configured to.
"""
if ignore_cert_errors:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
section_key = get_music_section(host, port, token, library_name,
secure, ignore_cert_errors)
api_endpoint = f'library/sections/{section_key}/refresh'
api_endpoint = append_token(api_endpoint, token)
url = urljoin('{}://{}:{}'.format(get_protocol(secure), host,
port), api_endpoint)
# Sends request and returns requests object.
r = requests.get(url, verify=not ignore_cert_errors)
return r
def append_token(url, token):
"""Appends the Plex Home token to the api call if required.
"""
if token:
url += '?' + urlencode({'X-Plex-Token': token})
return url
def get_protocol(secure):
if secure:
return 'https'
else:
return 'http'
class PlexUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
config['plex'].add({
'host': 'localhost',
'port': 32400,
'token': '',
'library_name': 'Music',
'secure': False,
'ignore_cert_errors': False})
config['plex']['token'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Plex server.
"""
self._log.info('Updating Plex library...')
# Try to send update request.
try:
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get(),
config['plex']['token'].get(),
config['plex']['library_name'].get(),
config['plex']['secure'].get(bool),
config['plex']['ignore_cert_errors'].get(bool))
self._log.info('... started.')
except requests.exceptions.RequestException:
self._log.warning('Update failed.')
| 3,569
|
Python
|
.py
| 89
| 31.786517
| 77
| 0.616652
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,626
|
mpdupdate.py
|
rembo10_headphones/lib/beetsplug/mpdupdate.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates an MPD index whenever the library is changed.
Put something like the following in your config.yaml to configure:
mpd:
host: localhost
port: 6600
password: seekrit
"""
from beets.plugins import BeetsPlugin
import os
import socket
from beets import config
# No need to introduce a dependency on an MPD library for such a
# simple use case. Here's a simple socket abstraction to make things
# easier.
class BufferedSocket:
"""Socket abstraction that allows reading by line."""
def __init__(self, host, port, sep=b'\n'):
if host[0] in ['/', '~']:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(os.path.expanduser(host))
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.buf = b''
self.sep = sep
def readline(self):
while self.sep not in self.buf:
data = self.sock.recv(1024)
if not data:
break
self.buf += data
if self.sep in self.buf:
res, self.buf = self.buf.split(self.sep, 1)
return res + self.sep
else:
return b''
def send(self, data):
self.sock.send(data)
def close(self):
self.sock.close()
class MPDUpdatePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config['mpd'].add({
'host': os.environ.get('MPD_HOST', 'localhost'),
'port': int(os.environ.get('MPD_PORT', 6600)),
'password': '',
})
config['mpd']['password'].redact = True
# For backwards compatibility, use any values from the
# plugin-specific "mpdupdate" section.
for key in config['mpd'].keys():
if self.config[key].exists():
config['mpd'][key] = self.config[key].get()
self.register_listener('database_change', self.db_change)
def db_change(self, lib, model):
self.register_listener('cli_exit', self.update)
def update(self, lib):
self.update_mpd(
config['mpd']['host'].as_str(),
config['mpd']['port'].get(int),
config['mpd']['password'].as_str(),
)
def update_mpd(self, host='localhost', port=6600, password=None):
"""Sends the "update" command to the MPD server indicated,
possibly authenticating with a password first.
"""
self._log.info('Updating MPD database...')
try:
s = BufferedSocket(host, port)
except OSError as e:
self._log.warning('MPD connection failed: {0}',
str(e.strerror))
return
resp = s.readline()
if b'OK MPD' not in resp:
self._log.warning('MPD connection failed: {0!r}', resp)
return
if password:
s.send(b'password "%s"\n' % password.encode('utf8'))
resp = s.readline()
if b'OK' not in resp:
self._log.warning('Authentication failed: {0!r}', resp)
s.send(b'close\n')
s.close()
return
s.send(b'update\n')
resp = s.readline()
if b'updating_db' not in resp:
self._log.warning('Update failed: {0!r}', resp)
s.send(b'close\n')
s.close()
self._log.info('Database updated.')
| 4,121
|
Python
|
.py
| 106
| 30.311321
| 73
| 0.599499
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,627
|
rewrite.py
|
rembo10_headphones/lib/beetsplug/rewrite.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses user-specified rewriting rules to canonicalize names for path
formats.
"""
import re
from collections import defaultdict
from beets.plugins import BeetsPlugin
from beets import ui
from beets import library
def rewriter(field, rules):
"""Create a template field function that rewrites the given field
with the given rewriting rules. ``rules`` must be a list of
(pattern, replacement) pairs.
"""
def fieldfunc(item):
value = item._values_fixed[field]
for pattern, replacement in rules:
if pattern.match(value.lower()):
# Rewrite activated.
return replacement
# Not activated; return original value.
return value
return fieldfunc
class RewritePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({})
# Gather all the rewrite rules for each field.
rules = defaultdict(list)
for key, view in self.config.items():
value = view.as_str()
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
raise ui.UserError("invalid rewrite specification")
if fieldname not in library.Item._fields:
raise ui.UserError("invalid field name (%s) in rewriter" %
fieldname)
self._log.debug('adding template field {0}', key)
pattern = re.compile(pattern.lower())
rules[fieldname].append((pattern, value))
if fieldname == 'artist':
# Special case for the artist field: apply the same
# rewrite for "albumartist" as well.
rules['albumartist'].append((pattern, value))
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.items():
getter = rewriter(fieldname, fieldrules)
self.template_fields[fieldname] = getter
if fieldname in library.Album._fields:
self.album_template_fields[fieldname] = getter
| 2,749
|
Python
|
.py
| 63
| 35.507937
| 74
| 0.660688
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,628
|
fetchart.py
|
rembo10_headphones/lib/beetsplug/fetchart.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
from collections import OrderedDict
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from mediafile import image_mime_type
from beets.util.artresizer import ArtResizer
from beets.util import sorted_walk
from beets.util import syspath, bytestring_path, py3_path
import confuse
CONTENT_TYPES = {
'image/jpeg': [b'jpg', b'jpeg'],
'image/png': [b'png']
}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class Candidate:
"""Holds information about a matching artwork, deals with validation of
dimension restrictions and resizing.
"""
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
CANDIDATE_DOWNSIZE = 3
CANDIDATE_DEINTERLACE = 4
CANDIDATE_REFORMAT = 5
MATCH_EXACT = 0
MATCH_FALLBACK = 1
def __init__(self, log, path=None, url=None, source='',
match=None, size=None):
self._log = log
self.path = path
self.url = url
self.source = source
self.check = None
self.match = match
self.size = size
def _validate(self, plugin):
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be rescaled.
Return `CANDIDATE_DOWNSIZE` if the file must be resized, and possibly
also rescaled.
Return `CANDIDATE_DEINTERLACE` if the file must be deinterlaced.
Return `CANDIDATE_REFORMAT` if the file has to be converted.
"""
if not self.path:
return self.CANDIDATE_BAD
if (not (plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth
or plugin.max_filesize or plugin.deinterlace
or plugin.cover_format)):
return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
self._log.debug('image size: {}', self.size)
if not self.size:
self._log.warning('Could not get size of image (please see '
'documentation for dependencies). '
'The configuration options `minwidth`, '
'`enforce_ratio` and `max_filesize` '
'may be violated.')
return self.CANDIDATE_EXACT
short_edge = min(self.size)
long_edge = max(self.size)
# Check minimum dimension.
if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug('image too small ({} < {})',
self.size[0], plugin.minwidth)
return self.CANDIDATE_BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if plugin.enforce_ratio:
if plugin.margin_px:
if edge_diff > plugin.margin_px:
self._log.debug('image is not close enough to being '
'square, ({} - {} > {})',
long_edge, short_edge, plugin.margin_px)
return self.CANDIDATE_BAD
elif plugin.margin_percent:
margin_px = plugin.margin_percent * long_edge
if edge_diff > margin_px:
self._log.debug('image is not close enough to being '
'square, ({} - {} > {})',
long_edge, short_edge, margin_px)
return self.CANDIDATE_BAD
elif edge_diff:
# also reached for margin_px == 0 and margin_percent == 0.0
self._log.debug('image is not square ({} != {})',
self.size[0], self.size[1])
return self.CANDIDATE_BAD
# Check maximum dimension.
downscale = False
if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug('image needs rescaling ({} > {})',
self.size[0], plugin.maxwidth)
downscale = True
# Check filesize.
downsize = False
if plugin.max_filesize:
filesize = os.stat(syspath(self.path)).st_size
if filesize > plugin.max_filesize:
self._log.debug('image needs resizing ({}B > {}B)',
filesize, plugin.max_filesize)
downsize = True
# Check image format
reformat = False
if plugin.cover_format:
fmt = ArtResizer.shared.get_format(self.path)
reformat = fmt != plugin.cover_format
if reformat:
self._log.debug('image needs reformatting: {} -> {}',
fmt, plugin.cover_format)
if downscale:
return self.CANDIDATE_DOWNSCALE
elif downsize:
return self.CANDIDATE_DOWNSIZE
elif plugin.deinterlace:
return self.CANDIDATE_DEINTERLACE
elif reformat:
return self.CANDIDATE_REFORMAT
else:
return self.CANDIDATE_EXACT
def validate(self, plugin):
self.check = self._validate(plugin)
return self.check
def resize(self, plugin):
if self.check == self.CANDIDATE_DOWNSCALE:
self.path = \
ArtResizer.shared.resize(plugin.maxwidth, self.path,
quality=plugin.quality,
max_filesize=plugin.max_filesize)
elif self.check == self.CANDIDATE_DOWNSIZE:
# dimensions are correct, so maxwidth is set to maximum dimension
self.path = \
ArtResizer.shared.resize(max(self.size), self.path,
quality=plugin.quality,
max_filesize=plugin.max_filesize)
elif self.check == self.CANDIDATE_DEINTERLACE:
self.path = ArtResizer.shared.deinterlace(self.path)
elif self.check == self.CANDIDATE_REFORMAT:
self.path = ArtResizer.shared.reformat(
self.path,
plugin.cover_format,
deinterlaced=plugin.deinterlace,
)
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
settings = s.merge_environment_settings(
prepped.url, {}, None, None, None
)
send_kwargs.update(settings)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin:
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
VALID_MATCHING_CRITERIA = ['default']
def __init__(self, log, config, match_by=None):
self._log = log
self._config = config
self.match_by = match_by or self.VALID_MATCHING_CRITERIA
def get(self, album, plugin, paths):
raise NotImplementedError()
def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, plugin):
raise NotImplementedError()
def cleanup(self, candidate):
pass
class LocalArtSource(ArtSource):
IS_LOCAL = True
LOC_STR = 'local'
def fetch_image(self, candidate, plugin):
pass
class RemoteArtSource(ArtSource):
IS_LOCAL = False
LOC_STR = 'remote'
def fetch_image(self, candidate, plugin):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
if plugin.maxwidth:
candidate.url = ArtResizer.shared.proxy_url(plugin.maxwidth,
candidate.url)
try:
with closing(self.request(candidate.url, stream=True,
message='downloading image')) as resp:
ct = resp.headers.get('Content-Type', None)
# Download the image to a temporary file. As some servers
# (notably fanart.tv) have proven to return wrong Content-Types
# when images were uploaded with a bad file extension, do not
# rely on it. Instead validate the type using the file magic
# and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b''
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return
real_ct = image_mime_type(header)
if real_ct is None:
# detection by file magic failed, fall back to the
# server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug('not a supported image: {}',
real_ct or 'unknown content type')
return
ext = b'.' + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning('Server specified {}, but returned a '
'{} image. Correcting the extension '
'to {}',
ct, real_ct, ext)
suffix = py3_path(ext)
with NamedTemporaryFile(suffix=suffix, delete=False) as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk)
self._log.debug('downloaded art to: {0}',
util.displayable_path(fh.name))
candidate.path = util.bytestring_path(fh.name)
return
except (OSError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug('error fetching art: {}', exc)
return
def cleanup(self, candidate):
if candidate.path:
try:
util.remove(path=candidate.path)
except util.FilesystemError as exc:
self._log.debug('error cleaning up tmp art: {}', exc)
class CoverArtArchive(RemoteArtSource):
NAME = "Cover Art Archive"
VALID_MATCHING_CRITERIA = ['release', 'releasegroup']
VALID_THUMBNAIL_SIZES = [250, 500, 1200]
URL = 'https://coverartarchive.org/release/{mbid}'
GROUP_URL = 'https://coverartarchive.org/release-group/{mbid}'
def get(self, album, plugin, paths):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
def get_image_urls(url, size_suffix=None):
try:
response = self.request(url)
except requests.RequestException:
self._log.debug('{}: error receiving response'
.format(self.NAME))
return
try:
data = response.json()
except ValueError:
self._log.debug('{}: error loading response: {}'
.format(self.NAME, response.text))
return
for item in data.get('images', []):
try:
if 'Front' not in item['types']:
continue
if size_suffix:
yield item['thumbnails'][size_suffix]
else:
yield item['image']
except KeyError:
pass
release_url = self.URL.format(mbid=album.mb_albumid)
release_group_url = self.GROUP_URL.format(mbid=album.mb_releasegroupid)
# Cover Art Archive API offers pre-resized thumbnails at several sizes.
# If the maxwidth config matches one of the already available sizes
# fetch it directly intead of fetching the full sized image and
# resizing it.
size_suffix = None
if plugin.maxwidth in self.VALID_THUMBNAIL_SIZES:
size_suffix = "-" + str(plugin.maxwidth)
if 'release' in self.match_by and album.mb_albumid:
for url in get_image_urls(release_url, size_suffix):
yield self._candidate(url=url, match=Candidate.MATCH_EXACT)
if 'releasegroup' in self.match_by and album.mb_releasegroupid:
for url in get_image_urls(release_group_url):
yield self._candidate(url=url, match=Candidate.MATCH_FALLBACK)
class Amazon(RemoteArtSource):
NAME = "Amazon"
URL = 'https://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album, plugin, paths):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self._candidate(url=self.URL % (album.asin, index),
match=Candidate.MATCH_EXACT)
class AlbumArtOrg(RemoteArtSource):
NAME = "AlbumArt.org scraper"
URL = 'https://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album, plugin, paths):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug('scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug('error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT)
else:
self._log.debug('no image found on page')
class GoogleImages(RemoteArtSource):
NAME = "Google Images"
URL = 'https://www.googleapis.com/customsearch/v1'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = self._config['google_key'].get(),
self.cx = self._config['google_engine'].get(),
def get(self, album, plugin, paths):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
try:
response = self.request(self.URL, params={
'key': self.key,
'cx': self.cx,
'q': search_string,
'searchType': 'image'
})
except requests.RequestException:
self._log.debug('google: error receiving response')
return
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug('google: error loading response: {}'
.format(response.text))
return
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug('google fetchart error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
yield self._candidate(url=item['link'],
match=Candidate.MATCH_EXACT)
class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API"""
NAME = "fanart.tv"
API_URL = 'https://webservice.fanart.tv/v3/'
API_ALBUMS = API_URL + 'music/albums/'
PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_key = self._config['fanarttv_key'].get()
def get(self, album, plugin, paths):
if not album.mb_releasegroupid:
return
try:
response = self.request(
self.API_ALBUMS + album.mb_releasegroupid,
headers={'api-key': self.PROJECT_KEY,
'client-key': self.client_key})
except requests.RequestException:
self._log.debug('fanart.tv: error receiving response')
return
try:
data = response.json()
except ValueError:
self._log.debug('fanart.tv: error loading response: {}',
response.text)
return
if 'status' in data and data['status'] == 'error':
if 'not found' in data['error message'].lower():
self._log.debug('fanart.tv: no image found')
elif 'api key' in data['error message'].lower():
self._log.warning('fanart.tv: Invalid API key given, please '
'enter a valid one in your config file.')
else:
self._log.debug('fanart.tv: error on request: {}',
data['error message'])
return
matches = []
# can there be more than one releasegroupid per response?
for mbid, art in data.get('albums', {}).items():
# there might be more art referenced, e.g. cdart, and an albumcover
# might not be present, even if the request was successful
if album.mb_releasegroupid == mbid and 'albumcover' in art:
matches.extend(art['albumcover'])
# can this actually occur?
else:
self._log.debug('fanart.tv: unexpected mb_releasegroupid in '
'response!')
matches.sort(key=lambda x: x['likes'], reverse=True)
for item in matches:
# fanart.tv has a strict size requirement for album art to be
# uploaded
yield self._candidate(url=item['url'],
match=Candidate.MATCH_EXACT,
size=(1000, 1000))
class ITunesStore(RemoteArtSource):
NAME = "iTunes Store"
API_URL = 'https://itunes.apple.com/search'
def get(self, album, plugin, paths):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
payload = {
'term': album.albumartist + ' ' + album.album,
'entity': 'album',
'media': 'music',
'limit': 200
}
try:
r = self.request(self.API_URL, params=payload)
r.raise_for_status()
except requests.RequestException as e:
self._log.debug('iTunes search failed: {0}', e)
return
try:
candidates = r.json()['results']
except ValueError as e:
self._log.debug('Could not decode json response: {0}', e)
return
except KeyError as e:
self._log.debug('{} not found in json. Fields are {} ',
e,
list(r.json().keys()))
return
if not candidates:
self._log.debug('iTunes search for {!r} got no results',
payload['term'])
return
if self._config['high_resolution']:
image_suffix = '100000x100000-999'
else:
image_suffix = '1200x1200bb'
for c in candidates:
try:
if (c['artistName'] == album.albumartist
and c['collectionName'] == album.album):
art_url = c['artworkUrl100']
art_url = art_url.replace('100x100bb',
image_suffix)
yield self._candidate(url=art_url,
match=Candidate.MATCH_EXACT)
except KeyError as e:
self._log.debug('Malformed itunes candidate: {} not found in {}', # NOQA E501
e,
list(c.keys()))
try:
fallback_art_url = candidates[0]['artworkUrl100']
fallback_art_url = fallback_art_url.replace('100x100bb',
image_suffix)
yield self._candidate(url=fallback_art_url,
match=Candidate.MATCH_FALLBACK)
except KeyError as e:
self._log.debug('Malformed itunes candidate: {} not found in {}',
e,
list(c.keys()))
class Wikipedia(RemoteArtSource):
NAME = "Wikipedia (queried through DBpedia)"
DBPEDIA_URL = 'https://dbpedia.org/sparql'
WIKIPEDIA_URL = 'https://en.wikipedia.org/w/api.php'
SPARQL_QUERY = '''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album, plugin, paths):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
try:
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
except requests.RequestException:
self._log.debug('dbpedia: error receiving response')
return
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug('wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
try:
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
except requests.RequestException:
self._log.debug('wikipedia: error receiving response')
return
# Try to see if one of the images on the pages matches our
# incomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
try:
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
except requests.RequestException:
self._log.debug('wikipedia: error receiving response')
return
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.items():
image_url = result['imageinfo'][0]['url']
yield self._candidate(url=image_url,
match=Candidate.MATCH_EXACT)
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping imageinfo')
return
class FileSystem(LocalArtSource):
NAME = "Filesystem"
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, album, plugin, paths):
"""Look for album art files in the specified directories.
"""
if not paths:
return
cover_names = list(map(util.bytestring_path, plugin.cover_names))
cover_names_str = b'|'.join(cover_names)
cover_pat = br''.join([br"(\b|_)(", cover_names_str, br")(\b|_)"])
for path in paths:
if not os.path.isdir(syspath(path)):
continue
# Find all files that look like images in the directory.
images = []
ignore = config['ignore'].as_str_seq()
ignore_hidden = config['ignore_hidden'].get(bool)
for _, _, files in sorted_walk(path, ignore=ignore,
ignore_hidden=ignore_hidden):
for fn in files:
fn = bytestring_path(fn)
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext) and \
os.path.isfile(syspath(os.path.join(path, fn))):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x:
self.filename_priority(x, cover_names))
remaining = []
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug('using well-named art file {0}',
util.displayable_path(fn))
yield self._candidate(path=os.path.join(path, fn),
match=Candidate.MATCH_EXACT)
else:
remaining.append(fn)
# Fall back to any image in the folder.
if remaining and not plugin.cautious:
self._log.debug('using fallback art file {0}',
util.displayable_path(remaining[0]))
yield self._candidate(path=os.path.join(path, remaining[0]),
match=Candidate.MATCH_FALLBACK)
class LastFM(RemoteArtSource):
NAME = "Last.fm"
# Sizes in priority order.
SIZES = OrderedDict([
('mega', (300, 300)),
('extralarge', (300, 300)),
('large', (174, 174)),
('medium', (64, 64)),
('small', (34, 34)),
])
API_URL = 'https://ws.audioscrobbler.com/2.0'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = self._config['lastfm_key'].get(),
def get(self, album, plugin, paths):
if not album.mb_albumid:
return
try:
response = self.request(self.API_URL, params={
'method': 'album.getinfo',
'api_key': self.key,
'mbid': album.mb_albumid,
'format': 'json',
})
except requests.RequestException:
self._log.debug('lastfm: error receiving response')
return
try:
data = response.json()
if 'error' in data:
if data['error'] == 6:
self._log.debug('lastfm: no results for {}',
album.mb_albumid)
else:
self._log.error(
'lastfm: failed to get album info: {} ({})',
data['message'], data['error'])
else:
images = {image['size']: image['#text']
for image in data['album']['image']}
# Provide candidates in order of size.
for size in self.SIZES.keys():
if size in images:
yield self._candidate(url=images[size],
size=self.SIZES[size])
except ValueError:
self._log.debug('lastfm: error loading response: {}'
.format(response.text))
return
# Try each source in turn.
SOURCES_ALL = ['filesystem',
'coverart', 'itunes', 'amazon', 'albumart',
'wikipedia', 'google', 'fanarttv', 'lastfm']
ART_SOURCES = {
'filesystem': FileSystem,
'coverart': CoverArtArchive,
'itunes': ITunesStore,
'albumart': AlbumArtOrg,
'amazon': Amazon,
'wikipedia': Wikipedia,
'google': GoogleImages,
'fanarttv': FanartTV,
'lastfm': LastFM,
}
SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
PAT_PX = r"(0|[1-9][0-9]*)px"
PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%"
def __init__(self):
super().__init__()
# Holds candidates corresponding to downloaded images between
# fetching them and placing them in the filesystem.
self.art_candidates = {}
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'quality': 0,
'max_filesize': 0,
'enforce_ratio': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['filesystem',
'coverart', 'itunes', 'amazon', 'albumart'],
'google_key': None,
'google_engine': '001442825323518660753:hrh5ch1gjzm',
'fanarttv_key': None,
'lastfm_key': None,
'store_source': False,
'high_resolution': False,
'deinterlace': False,
'cover_format': None,
})
self.config['google_key'].redact = True
self.config['fanarttv_key'].redact = True
self.config['lastfm_key'].redact = True
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
self.max_filesize = self.config['max_filesize'].get(int)
self.quality = self.config['quality'].get(int)
# allow both pixel and percentage-based margin specifications
self.enforce_ratio = self.config['enforce_ratio'].get(
confuse.OneOf([bool,
confuse.String(pattern=self.PAT_PX),
confuse.String(pattern=self.PAT_PERCENT)]))
self.margin_px = None
self.margin_percent = None
self.deinterlace = self.config['deinterlace'].get(bool)
if type(self.enforce_ratio) is str:
if self.enforce_ratio[-1] == '%':
self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == 'px':
self.margin_px = int(self.enforce_ratio[:-2])
else:
# shouldn't happen
raise confuse.ConfigValueError()
self.enforce_ratio = True
cover_names = self.config['cover_names'].as_str_seq()
self.cover_names = list(map(util.bytestring_path, cover_names))
self.cautious = self.config['cautious'].get(bool)
self.store_source = self.config['store_source'].get(bool)
self.src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
self.cover_format = self.config['cover_format'].get(
confuse.Optional(str)
)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not self.config['google_key'].get() and \
'google' in available_sources:
available_sources.remove('google')
if not self.config['lastfm_key'].get() and \
'lastfm' in available_sources:
available_sources.remove('lastfm')
available_sources = [(s, c)
for s in available_sources
for c in ART_SOURCES[s].VALID_MATCHING_CRITERIA]
sources = plugins.sanitize_pairs(
self.config['sources'].as_pairs(default_value='*'),
available_sources)
if 'remote_priority' in self.config:
self._log.warning(
'The `fetch_art.remote_priority` configuration option has '
'been deprecated. Instead, place `filesystem` at the end of '
'your `sources` list.')
if self.config['remote_priority'].get(bool):
fs = []
others = []
for s, c in sources:
if s == 'filesystem':
fs.append((s, c))
else:
others.append((s, c))
sources = others + fs
self.sources = [ART_SOURCES[s](self._log, self.config, match_by=[c])
for s, c in sources]
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(task.album.artpath):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag in (importer.action.APPLY,
importer.action.RETAG):
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
candidate = self.art_for_album(task.album, task.paths, local)
if candidate:
self.art_candidates[task] = candidate
def _set_art(self, album, candidate, delete=False):
album.set_art(candidate.path, delete)
if self.store_source:
# store the source of the chosen artwork in a flexible field
self._log.debug(
"Storing art_source for {0.albumartist} - {0.album}",
album)
album.art_source = SOURCE_NAMES[type(candidate.source)]
album.store()
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_candidates:
candidate = self.art_candidates.pop(task)
self._set_art(task.album, candidate, not self.src_removed)
if self.src_removed:
task.prune(candidate.path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option(
'-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present'
)
cmd.parser.add_option(
'-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='quiet mode: do not output albums that already have artwork'
)
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force,
opts.quiet)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `quality` then resized images
are saved at the specified quality level. If `local_only`, then only
local image files from the filesystem are returned; no network
requests are made.
"""
out = None
for source in self.sources:
if source.IS_LOCAL or not local_only:
self._log.debug(
'trying source {0} for album {1.albumartist} - {1.album}',
SOURCE_NAMES[type(source)],
album,
)
# URLs might be invalid at this point, or the image may not
# fulfill the requirements
for candidate in source.get(album, self, paths):
source.fetch_image(candidate, self)
if candidate.validate(self):
out = candidate
self._log.debug(
'using {0.LOC_STR} image {1}'.format(
source, util.displayable_path(out.path)))
break
# Remove temporary files for invalid candidates.
source.cleanup(candidate)
if out:
break
if out:
out.resize(self)
return out
def batch_fetch_art(self, lib, albums, force, quiet):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
if not quiet:
message = ui.colorize('text_highlight_minor',
'has album art')
self._log.info('{0}: {1}', album, message)
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
candidate = self.art_for_album(album, local_paths)
if candidate:
self._set_art(album, candidate)
message = ui.colorize('text_success', 'found album art')
else:
message = ui.colorize('text_error', 'no art found')
self._log.info('{0}: {1}', album, message)
| 44,418
|
Python
|
.py
| 982
| 31.578411
| 94
| 0.539416
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,629
|
bucket.py
|
rembo10_headphones/lib/beetsplug/bucket.py
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the %bucket{} function for path formatting.
"""
from datetime import datetime
import re
import string
from itertools import tee
from beets import plugins, ui
ASCII_DIGITS = string.digits + string.ascii_lowercase
class BucketError(Exception):
pass
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def span_from_str(span_str):
"""Build a span dict from the span string representation.
"""
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year
"""
if yearfrom < 100:
raise BucketError("%d must be expressed on 4 digits" % yearfrom)
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
if d < 100:
if (d % 100) < (yearfrom % 100):
d = (yearfrom - yearfrom % 100) + 100 + d
else:
d = (yearfrom - yearfrom % 100) + d
return d
years = [int(x) for x in re.findall(r'\d+', span_str)]
if not years:
raise ui.UserError("invalid range defined for year bucket '%s': no "
"year found" % span_str)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError("invalid range defined for year bucket '%s': %s" %
(span_str, exc))
res = {'from': years[0], 'str': span_str}
if len(years) > 1:
res['to'] = years[-1]
return res
def complete_year_spans(spans):
"""Set the `to` value of spans if empty and sort them chronologically.
"""
spans.sort(key=lambda x: x['from'])
for (x, y) in pairwise(spans):
if 'to' not in x:
x['to'] = y['from'] - 1
if spans and 'to' not in spans[-1]:
spans[-1]['to'] = datetime.now().year
def extend_year_spans(spans, spanlen, start=1900, end=2014):
"""Add new spans to given spans list so that every year of [start,end]
belongs to a span.
"""
extended_spans = spans[:]
for (x, y) in pairwise(spans):
# if a gap between two spans, fill the gap with as much spans of
# spanlen length as necessary
for span_from in range(x['to'] + 1, y['from'], spanlen):
extended_spans.append({'from': span_from})
# Create spans prior to declared ones
for span_from in range(spans[0]['from'] - spanlen, start, -spanlen):
extended_spans.append({'from': span_from})
# Create spans after the declared ones
for span_from in range(spans[-1]['to'] + 1, end, spanlen):
extended_spans.append({'from': span_from})
complete_year_spans(extended_spans)
return extended_spans
def build_year_spans(year_spans_str):
"""Build a chronologically ordered list of spans dict from unordered spans
stringlist.
"""
spans = []
for elem in year_spans_str:
spans.append(span_from_str(elem))
complete_year_spans(spans)
return spans
def str2fmt(s):
"""Deduces formatting syntax from a span string.
"""
regex = re.compile(r"(?P<bef>\D*)(?P<fromyear>\d+)(?P<sep>\D*)"
r"(?P<toyear>\d*)(?P<after>\D*)")
m = re.match(regex, s)
res = {'fromnchars': len(m.group('fromyear')),
'tonchars': len(m.group('toyear'))}
res['fmt'] = "{}%s{}{}{}".format(m.group('bef'),
m.group('sep'),
'%s' if res['tonchars'] else '',
m.group('after'))
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation.
"""
args = (str(yearfrom)[-fromnchars:])
if tonchars:
args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
return fmt % args
def extract_modes(spans):
"""Extract the most common spans lengths and representation formats
"""
rangelen = sorted([x['to'] - x['from'] + 1 for x in spans])
deflen = sorted(rangelen, key=rangelen.count)[-1]
reprs = [str2fmt(x['str']) for x in spans]
deffmt = sorted(reprs, key=reprs.count)[-1]
return deflen, deffmt
def build_alpha_spans(alpha_spans_str, alpha_regexs):
"""Extract alphanumerics from string and return sorted list of chars
[from...to]
"""
spans = []
for elem in alpha_spans_str:
if elem in alpha_regexs:
spans.append(re.compile(alpha_regexs[elem]))
else:
bucket = sorted([x for x in elem.lower() if x.isalnum()])
if bucket:
begin_index = ASCII_DIGITS.index(bucket[0])
end_index = ASCII_DIGITS.index(bucket[-1])
else:
raise ui.UserError("invalid range defined for alpha bucket "
"'%s': no alphanumeric character found" %
elem)
spans.append(
re.compile(
"^[" + ASCII_DIGITS[begin_index:end_index + 1] +
ASCII_DIGITS[begin_index:end_index + 1].upper() + "]"
)
)
return spans
class BucketPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.template_funcs['bucket'] = self._tmpl_bucket
self.config.add({
'bucket_year': [],
'bucket_alpha': [],
'bucket_alpha_regex': {},
'extrapolate': False
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
self.year_spans = build_year_spans(self.config['bucket_year'].get())
if self.year_spans and self.config['extrapolate']:
[self.ys_len_mode,
self.ys_repr_mode] = extract_modes(self.year_spans)
self.year_spans = extend_year_spans(self.year_spans,
self.ys_len_mode)
self.alpha_spans = build_alpha_spans(
self.config['bucket_alpha'].get(),
self.config['bucket_alpha_regex'].get()
)
def find_bucket_year(self, year):
"""Return bucket that matches given year or return the year
if no matching bucket.
"""
for ys in self.year_spans:
if ys['from'] <= int(year) <= ys['to']:
if 'str' in ys:
return ys['str']
else:
return format_span(self.ys_repr_mode['fmt'],
ys['from'], ys['to'],
self.ys_repr_mode['fromnchars'],
self.ys_repr_mode['tonchars'])
return year
def find_bucket_alpha(self, s):
"""Return alpha-range bucket that matches given string or return the
string initial if no matching bucket.
"""
for (i, span) in enumerate(self.alpha_spans):
if span.match(s):
return self.config['bucket_alpha'].get()[i]
return s[0].upper()
def _tmpl_bucket(self, text, field=None):
if not field and len(text) == 4 and text.isdigit():
field = 'year'
if field == 'year':
func = self.find_bucket_year
else:
func = self.find_bucket_alpha
return func(text)
| 8,052
|
Python
|
.py
| 199
| 31.075377
| 78
| 0.5758
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,630
|
mpdstats.py
|
rembo10_headphones/lib/beetsplug/mpdstats.py
|
# This file is part of beets.
# Copyright 2016, Peter Schnebel and Johann Kl√§hn.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import mpd
import time
import os
from beets import ui
from beets import config
from beets import plugins
from beets import library
from beets.util import displayable_path
from beets.dbcore import types
# If we lose the connection, how many times do we want to retry and how
# much time should we wait between retries?
RETRIES = 10
RETRY_INTERVAL = 5
mpd_config = config['mpd']
def is_url(path):
"""Try to determine if the path is an URL.
"""
if isinstance(path, bytes): # if it's bytes, then it's a path
return False
return path.split('://', 1)[0] in ['http', 'https']
class MPDClientWrapper:
def __init__(self, log):
self._log = log
self.music_directory = mpd_config['music_directory'].as_str()
self.strip_path = mpd_config['strip_path'].as_str()
# Ensure strip_path end with '/'
if not self.strip_path.endswith('/'):
self.strip_path += '/'
self._log.debug('music_directory: {0}', self.music_directory)
self._log.debug('strip_path: {0}', self.strip_path)
self.client = mpd.MPDClient()
def connect(self):
"""Connect to the MPD.
"""
host = mpd_config['host'].as_str()
port = mpd_config['port'].get(int)
if host[0] in ['/', '~']:
host = os.path.expanduser(host)
self._log.info('connecting to {0}:{1}', host, port)
try:
self.client.connect(host, port)
except OSError as e:
raise ui.UserError(f'could not connect to MPD: {e}')
password = mpd_config['password'].as_str()
if password:
try:
self.client.password(password)
except mpd.CommandError as e:
raise ui.UserError(
f'could not authenticate to MPD: {e}'
)
def disconnect(self):
"""Disconnect from the MPD.
"""
self.client.close()
self.client.disconnect()
def get(self, command, retries=RETRIES):
"""Wrapper for requests to the MPD server. Tries to re-connect if the
connection was lost (f.ex. during MPD's library refresh).
"""
try:
return getattr(self.client, command)()
except (OSError, mpd.ConnectionError) as err:
self._log.error('{0}', err)
if retries <= 0:
# if we exited without breaking, we couldn't reconnect in time :(
raise ui.UserError('communication with MPD server failed')
time.sleep(RETRY_INTERVAL)
try:
self.disconnect()
except mpd.ConnectionError:
pass
self.connect()
return self.get(command, retries=retries - 1)
def currentsong(self):
"""Return the path to the currently playing song, along with its
songid. Prefixes paths with the music_directory, to get the absolute
path.
In some cases, we need to remove the local path from MPD server,
we replace 'strip_path' with ''.
`strip_path` defaults to ''.
"""
result = None
entry = self.get('currentsong')
if 'file' in entry:
if not is_url(entry['file']):
file = entry['file']
if file.startswith(self.strip_path):
file = file[len(self.strip_path):]
result = os.path.join(self.music_directory, file)
else:
result = entry['file']
self._log.debug('returning: {0}', result)
return result, entry.get('id')
def status(self):
"""Return the current status of the MPD.
"""
return self.get('status')
def events(self):
"""Return list of events. This may block a long time while waiting for
an answer from MPD.
"""
return self.get('idle')
class MPDStats:
def __init__(self, lib, log):
self.lib = lib
self._log = log
self.do_rating = mpd_config['rating'].get(bool)
self.rating_mix = mpd_config['rating_mix'].get(float)
self.time_threshold = 10.0 # TODO: maybe add config option?
self.now_playing = None
self.mpd = MPDClientWrapper(log)
def rating(self, play_count, skip_count, rating, skipped):
"""Calculate a new rating for a song based on play count, skip count,
old rating and the fact if it was skipped or not.
"""
if skipped:
rolling = (rating - rating / 2.0)
else:
rolling = (rating + (1.0 - rating) / 2.0)
stable = (play_count + 1.0) / (play_count + skip_count + 2.0)
return (self.rating_mix * stable +
(1.0 - self.rating_mix) * rolling)
def get_item(self, path):
"""Return the beets item related to path.
"""
query = library.PathQuery('path', path)
item = self.lib.items(query).get()
if item:
return item
else:
self._log.info('item not found: {0}', displayable_path(path))
def update_item(self, item, attribute, value=None, increment=None):
"""Update the beets item. Set attribute to value or increment the value
of attribute. If the increment argument is used the value is cast to
the corresponding type.
"""
if item is None:
return
if increment is not None:
item.load()
value = type(increment)(item.get(attribute, 0)) + increment
if value is not None:
item[attribute] = value
item.store()
self._log.debug('updated: {0} = {1} [{2}]',
attribute,
item[attribute],
displayable_path(item.path))
def update_rating(self, item, skipped):
"""Update the rating for a beets item. The `item` can either be a
beets `Item` or None. If the item is None, nothing changes.
"""
if item is None:
return
item.load()
rating = self.rating(
int(item.get('play_count', 0)),
int(item.get('skip_count', 0)),
float(item.get('rating', 0.5)),
skipped)
self.update_item(item, 'rating', rating)
def handle_song_change(self, song):
"""Determine if a song was skipped or not and update its attributes.
To this end the difference between the song's supposed end time
and the current time is calculated. If it's greater than a threshold,
the song is considered skipped.
Returns whether the change was manual (skipped previous song or not)
"""
diff = abs(song['remaining'] - (time.time() - song['started']))
skipped = diff >= self.time_threshold
if skipped:
self.handle_skipped(song)
else:
self.handle_played(song)
if self.do_rating:
self.update_rating(song['beets_item'], skipped)
return skipped
def handle_played(self, song):
"""Updates the play count of a song.
"""
self.update_item(song['beets_item'], 'play_count', increment=1)
self._log.info('played {0}', displayable_path(song['path']))
def handle_skipped(self, song):
"""Updates the skip count of a song.
"""
self.update_item(song['beets_item'], 'skip_count', increment=1)
self._log.info('skipped {0}', displayable_path(song['path']))
def on_stop(self, status):
self._log.info('stop')
# if the current song stays the same it means that we stopped on the
# current track and should not record a skip.
if self.now_playing and self.now_playing['id'] != status.get('songid'):
self.handle_song_change(self.now_playing)
self.now_playing = None
def on_pause(self, status):
self._log.info('pause')
self.now_playing = None
def on_play(self, status):
path, songid = self.mpd.currentsong()
if not path:
return
played, duration = map(int, status['time'].split(':', 1))
remaining = duration - played
if self.now_playing:
if self.now_playing['path'] != path:
self.handle_song_change(self.now_playing)
else:
# In case we got mpd play event with same song playing
# multiple times,
# assume low diff means redundant second play event
# after natural song start.
diff = abs(time.time() - self.now_playing['started'])
if diff <= self.time_threshold:
return
if self.now_playing['path'] == path and played == 0:
self.handle_song_change(self.now_playing)
if is_url(path):
self._log.info('playing stream {0}', displayable_path(path))
self.now_playing = None
return
self._log.info('playing {0}', displayable_path(path))
self.now_playing = {
'started': time.time(),
'remaining': remaining,
'path': path,
'id': songid,
'beets_item': self.get_item(path),
}
self.update_item(self.now_playing['beets_item'],
'last_played', value=int(time.time()))
def run(self):
self.mpd.connect()
events = ['player']
while True:
if 'player' in events:
status = self.mpd.status()
handler = getattr(self, 'on_' + status['state'], None)
if handler:
handler(status)
else:
self._log.debug('unhandled status "{0}"', status)
events = self.mpd.events()
class MPDStatsPlugin(plugins.BeetsPlugin):
item_types = {
'play_count': types.INTEGER,
'skip_count': types.INTEGER,
'last_played': library.DateType(),
'rating': types.FLOAT,
}
def __init__(self):
super().__init__()
mpd_config.add({
'music_directory': config['directory'].as_filename(),
'strip_path': '',
'rating': True,
'rating_mix': 0.75,
'host': os.environ.get('MPD_HOST', 'localhost'),
'port': int(os.environ.get('MPD_PORT', 6600)),
'password': '',
})
mpd_config['password'].redact = True
def commands(self):
cmd = ui.Subcommand(
'mpdstats',
help='run a MPD client to gather play statistics')
cmd.parser.add_option(
'--host', dest='host', type='string',
help='set the hostname of the server to connect to')
cmd.parser.add_option(
'--port', dest='port', type='int',
help='set the port of the MPD server to connect to')
cmd.parser.add_option(
'--password', dest='password', type='string',
help='set the password of the MPD server to connect to')
def func(lib, opts, args):
mpd_config.set_args(opts)
# Overrides for MPD settings.
if opts.host:
mpd_config['host'] = opts.host.decode('utf-8')
if opts.port:
mpd_config['host'] = int(opts.port)
if opts.password:
mpd_config['password'] = opts.password.decode('utf-8')
try:
MPDStats(lib, self._log).run()
except KeyboardInterrupt:
pass
cmd.func = func
return [cmd]
| 12,278
|
Python
|
.py
| 303
| 30.392739
| 79
| 0.573962
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,631
|
hook.py
|
rembo10_headphones/lib/beetsplug/hook.py
|
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
import string
import subprocess
import shlex
from beets.plugins import BeetsPlugin
from beets.util import arg_encoding
class CodingFormatter(string.Formatter):
"""A variant of `string.Formatter` that converts everything to `unicode`
strings.
This is necessary on Python 2, where formatting otherwise occurs on
bytestrings. It intercepts two points in the formatting process to decode
the format string and all fields using the specified encoding. If decoding
fails, the values are used as-is.
"""
def __init__(self, coding):
"""Creates a new coding formatter with the provided coding."""
self._coding = coding
def format(self, format_string, *args, **kwargs):
"""Formats the provided string using the provided arguments and keyword
arguments.
This method decodes the format string using the formatter's coding.
See str.format and string.Formatter.format.
"""
if isinstance(format_string, bytes):
format_string = format_string.decode(self._coding)
return super().format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
See string.Formatter.convert_field.
"""
converted = super().convert_field(value,
conversion)
if isinstance(converted, bytes):
return converted.decode(self._coding)
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super().__init__()
self.config.add({
'hooks': []
})
hooks = self.config['hooks'].get(list)
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].as_str()
hook_command = hook['command'].as_str()
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{0}"', command)
return
# Use a string formatter that works on Unicode strings.
formatter = CodingFormatter(arg_encoding())
command_pieces = shlex.split(command)
for i, piece in enumerate(command_pieces):
command_pieces[i] = formatter.format(piece, event=event,
**kwargs)
self._log.debug('running command "{0}" for event {1}',
' '.join(command_pieces), event)
try:
subprocess.check_call(command_pieces)
except subprocess.CalledProcessError as exc:
self._log.error('hook for {0} exited with status {1}',
event, exc.returncode)
except OSError as exc:
self._log.error('hook for {0} failed: {1}', event, exc)
self.register_listener(event, hook_function)
| 4,040
|
Python
|
.py
| 84
| 38.285714
| 79
| 0.640764
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,632
|
acousticbrainz.py
|
rembo10_headphones/lib/beetsplug/acousticbrainz.py
|
# This file is part of beets.
# Copyright 2015-2016, Ohm Patel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch various AcousticBrainz metadata using MBID.
"""
from collections import defaultdict
import requests
from beets import plugins, ui
from beets.dbcore import types
ACOUSTIC_BASE = "https://acousticbrainz.org/"
LEVELS = ["/low-level", "/high-level"]
ABSCHEME = {
'highlevel': {
'danceability': {
'all': {
'danceable': 'danceable'
}
},
'gender': {
'value': 'gender'
},
'genre_rosamerica': {
'value': 'genre_rosamerica'
},
'mood_acoustic': {
'all': {
'acoustic': 'mood_acoustic'
}
},
'mood_aggressive': {
'all': {
'aggressive': 'mood_aggressive'
}
},
'mood_electronic': {
'all': {
'electronic': 'mood_electronic'
}
},
'mood_happy': {
'all': {
'happy': 'mood_happy'
}
},
'mood_party': {
'all': {
'party': 'mood_party'
}
},
'mood_relaxed': {
'all': {
'relaxed': 'mood_relaxed'
}
},
'mood_sad': {
'all': {
'sad': 'mood_sad'
}
},
'moods_mirex': {
'value': 'moods_mirex'
},
'ismir04_rhythm': {
'value': 'rhythm'
},
'tonal_atonal': {
'all': {
'tonal': 'tonal'
}
},
'timbre': {
'value': 'timbre'
},
'voice_instrumental': {
'value': 'voice_instrumental'
},
},
'lowlevel': {
'average_loudness': 'average_loudness'
},
'rhythm': {
'bpm': 'bpm'
},
'tonal': {
'chords_changes_rate': 'chords_changes_rate',
'chords_key': 'chords_key',
'chords_number_rate': 'chords_number_rate',
'chords_scale': 'chords_scale',
'key_key': ('initial_key', 0),
'key_scale': ('initial_key', 1),
'key_strength': 'key_strength'
}
}
class AcousticPlugin(plugins.BeetsPlugin):
item_types = {
'average_loudness': types.Float(6),
'chords_changes_rate': types.Float(6),
'chords_key': types.STRING,
'chords_number_rate': types.Float(6),
'chords_scale': types.STRING,
'danceable': types.Float(6),
'gender': types.STRING,
'genre_rosamerica': types.STRING,
'initial_key': types.STRING,
'key_strength': types.Float(6),
'mood_acoustic': types.Float(6),
'mood_aggressive': types.Float(6),
'mood_electronic': types.Float(6),
'mood_happy': types.Float(6),
'mood_party': types.Float(6),
'mood_relaxed': types.Float(6),
'mood_sad': types.Float(6),
'moods_mirex': types.STRING,
'rhythm': types.Float(6),
'timbre': types.STRING,
'tonal': types.Float(6),
'voice_instrumental': types.STRING,
}
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
'force': False,
'tags': []
})
if self.config['auto']:
self.register_listener('import_task_files',
self.import_task_files)
def commands(self):
cmd = ui.Subcommand('acousticbrainz',
help="fetch metadata from AcousticBrainz")
cmd.parser.add_option(
'-f', '--force', dest='force_refetch',
action='store_true', default=False,
help='re-download data when already present'
)
def func(lib, opts, args):
items = lib.items(ui.decargs(args))
self._fetch_info(items, ui.should_write(),
opts.force_refetch or self.config['force'])
cmd.func = func
return [cmd]
def import_task_files(self, session, task):
"""Function is called upon beet import.
"""
self._fetch_info(task.imported_items(), False, True)
def _get_data(self, mbid):
data = {}
for url in _generate_urls(mbid):
self._log.debug('fetching URL: {}', url)
try:
res = requests.get(url)
except requests.RequestException as exc:
self._log.info('request error: {}', exc)
return {}
if res.status_code == 404:
self._log.info('recording ID {} not found', mbid)
return {}
try:
data.update(res.json())
except ValueError:
self._log.debug('Invalid Response: {}', res.text)
return {}
return data
def _fetch_info(self, items, write, force):
"""Fetch additional information from AcousticBrainz for the `item`s.
"""
tags = self.config['tags'].as_str_seq()
for item in items:
# If we're not forcing re-downloading for all tracks, check
# whether the data is already present. We use one
# representative field name to check for previously fetched
# data.
if not force:
mood_str = item.get('mood_acoustic', '')
if mood_str:
self._log.info('data already present for: {}', item)
continue
# We can only fetch data for tracks with MBIDs.
if not item.mb_trackid:
continue
self._log.info('getting data for: {}', item)
data = self._get_data(item.mb_trackid)
if data:
for attr, val in self._map_data_to_scheme(data, ABSCHEME):
if not tags or attr in tags:
self._log.debug('attribute {} of {} set to {}',
attr,
item,
val)
setattr(item, attr, val)
else:
self._log.debug('skipping attribute {} of {}'
' (value {}) due to config',
attr,
item,
val)
item.store()
if write:
item.try_write()
def _map_data_to_scheme(self, data, scheme):
"""Given `data` as a structure of nested dictionaries, and `scheme` as a
structure of nested dictionaries , `yield` tuples `(attr, val)` where
`attr` and `val` are corresponding leaf nodes in `scheme` and `data`.
As its name indicates, `scheme` defines how the data is structured,
so this function tries to find leaf nodes in `data` that correspond
to the leafs nodes of `scheme`, and not the other way around.
Leaf nodes of `data` that do not exist in the `scheme` do not matter.
If a leaf node of `scheme` is not present in `data`,
no value is yielded for that attribute and a simple warning is issued.
Finally, to account for attributes of which the value is split between
several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
`(attr, order)` where `attr` is the attribute to which the leaf node
belongs, and `order` is the place at which it should appear in the
value. The different `value`s belonging to the same `attr` are simply
joined with `' '`. This is hardcoded and not very flexible, but it gets
the job done.
For example:
>>> scheme = {
'key1': 'attribute',
'key group': {
'subkey1': 'subattribute',
'subkey2': ('composite attribute', 0)
},
'key2': ('composite attribute', 1)
}
>>> data = {
'key1': 'value',
'key group': {
'subkey1': 'subvalue',
'subkey2': 'part 1 of composite attr'
},
'key2': 'part 2'
}
>>> print(list(_map_data_to_scheme(data, scheme)))
[('subattribute', 'subvalue'),
('attribute', 'value'),
('composite attribute', 'part 1 of composite attr part 2')]
"""
# First, we traverse `scheme` and `data`, `yield`ing all the non
# composites attributes straight away and populating the dictionary
# `composites` with the composite attributes.
# When we are finished traversing `scheme`, `composites` should
# map each composite attribute to an ordered list of the values
# belonging to the attribute, for example:
# `composites = {'initial_key': ['B', 'minor']}`.
# The recursive traversal.
composites = defaultdict(list)
yield from self._data_to_scheme_child(data,
scheme,
composites)
# When composites has been populated, yield the composite attributes
# by joining their parts.
for composite_attr, value_parts in composites.items():
yield composite_attr, ' '.join(value_parts)
def _data_to_scheme_child(self, subdata, subscheme, composites):
"""The recursive business logic of :meth:`_map_data_to_scheme`:
Traverse two structures of nested dictionaries in parallel and `yield`
tuples of corresponding leaf nodes.
If a leaf node belongs to a composite attribute (is a `tuple`),
populate `composites` rather than yielding straight away.
All the child functions for a single traversal share the same
`composites` instance, which is passed along.
"""
for k, v in subscheme.items():
if k in subdata:
if type(v) == dict:
yield from self._data_to_scheme_child(subdata[k],
v,
composites)
elif type(v) == tuple:
composite_attribute, part_number = v
attribute_parts = composites[composite_attribute]
# Parts are not guaranteed to be inserted in order
while len(attribute_parts) <= part_number:
attribute_parts.append('')
attribute_parts[part_number] = subdata[k]
else:
yield v, subdata[k]
else:
self._log.warning('Acousticbrainz did not provide info'
'about {}', k)
self._log.debug('Data {} could not be mapped to scheme {} '
'because key {} was not found', subdata, v, k)
def _generate_urls(mbid):
"""Generates AcousticBrainz end point urls for given `mbid`.
"""
for level in LEVELS:
yield ACOUSTIC_BASE + mbid + level
| 11,826
|
Python
|
.py
| 299
| 27.277592
| 80
| 0.520884
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,633
|
ipfs.py
|
rembo10_headphones/lib/beetsplug/ipfs.py
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon
"""
from beets import ui, util, library, config
from beets.plugins import BeetsPlugin
import subprocess
import shutil
import os
import tempfile
class IPFSPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
'nocopy': False,
})
if self.config['auto']:
self.import_stages = [self.auto_add]
def commands(self):
cmd = ui.Subcommand('ipfs',
help='interact with ipfs')
cmd.parser.add_option('-a', '--add', dest='add',
action='store_true',
help='Add to ipfs')
cmd.parser.add_option('-g', '--get', dest='get',
action='store_true',
help='Get from ipfs')
cmd.parser.add_option('-p', '--publish', dest='publish',
action='store_true',
help='Publish local library to ipfs')
cmd.parser.add_option('-i', '--import', dest='_import',
action='store_true',
help='Import remote library from ipfs')
cmd.parser.add_option('-l', '--list', dest='_list',
action='store_true',
help='Query imported libraries')
cmd.parser.add_option('-m', '--play', dest='play',
action='store_true',
help='Play music from remote libraries')
def func(lib, opts, args):
if opts.add:
for album in lib.albums(ui.decargs(args)):
if len(album.items()) == 0:
self._log.info('{0} does not contain items, aborting',
album)
self.ipfs_add(album)
album.store()
if opts.get:
self.ipfs_get(lib, ui.decargs(args))
if opts.publish:
self.ipfs_publish(lib)
if opts._import:
self.ipfs_import(lib, ui.decargs(args))
if opts._list:
self.ipfs_list(lib, ui.decargs(args))
if opts.play:
self.ipfs_play(lib, opts, ui.decargs(args))
cmd.func = func
return [cmd]
def auto_add(self, session, task):
if task.is_album:
if self.ipfs_add(task.album):
task.album.store()
def ipfs_play(self, lib, opts, args):
from beetsplug.play import PlayPlugin
jlib = self.get_remote_lib(lib)
player = PlayPlugin()
config['play']['relative_to'] = None
player.album = True
player.play_music(jlib, player, args)
def ipfs_add(self, album):
try:
album_dir = album.item_dir()
except AttributeError:
return False
try:
if album.ipfs:
self._log.debug('{0} already added', album_dir)
# Already added to ipfs
return False
except AttributeError:
pass
self._log.info('Adding {0} to ipfs', album_dir)
if self.config['nocopy']:
cmd = "ipfs add --nocopy -q -r".split()
else:
cmd = "ipfs add -q -r".split()
cmd.append(album_dir)
try:
output = util.command_output(cmd).stdout.split()
except (OSError, subprocess.CalledProcessError) as exc:
self._log.error('Failed to add {0}, error: {1}', album_dir, exc)
return False
length = len(output)
for linenr, line in enumerate(output):
line = line.strip()
if linenr == length - 1:
# last printed line is the album hash
self._log.info("album: {0}", line)
album.ipfs = line
else:
try:
item = album.items()[linenr]
self._log.info("item: {0}", line)
item.ipfs = line
item.store()
except IndexError:
# if there's non music files in the to-add folder they'll
# get ignored here
pass
return True
def ipfs_get(self, lib, query):
query = query[0]
# Check if query is a hash
# TODO: generalize to other hashes; probably use a multihash
# implementation
if query.startswith("Qm") and len(query) == 46:
self.ipfs_get_from_hash(lib, query)
else:
albums = self.query(lib, query)
for album in albums:
self.ipfs_get_from_hash(lib, album.ipfs)
def ipfs_get_from_hash(self, lib, _hash):
try:
cmd = "ipfs get".split()
cmd.append(_hash)
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError) as err:
self._log.error('Failed to get {0} from ipfs.\n{1}',
_hash, err.output)
return False
self._log.info('Getting {0} from ipfs', _hash)
imp = ui.commands.TerminalImportSession(lib, loghandler=None,
query=None, paths=[_hash])
imp.run()
shutil.rmtree(_hash)
def ipfs_publish(self, lib):
with tempfile.NamedTemporaryFile() as tmp:
self.ipfs_added_albums(lib, tmp.name)
try:
if self.config['nocopy']:
cmd = "ipfs add --nocopy -q ".split()
else:
cmd = "ipfs add -q ".split()
cmd.append(tmp.name)
output = util.command_output(cmd).stdout
except (OSError, subprocess.CalledProcessError) as err:
msg = f"Failed to publish library. Error: {err}"
self._log.error(msg)
return False
self._log.info("hash of library: {0}", output)
def ipfs_import(self, lib, args):
_hash = args[0]
if len(args) > 1:
lib_name = args[1]
else:
lib_name = _hash
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
if not os.path.exists(remote_libs):
try:
os.makedirs(remote_libs)
except OSError as e:
msg = f"Could not create {remote_libs}. Error: {e}"
self._log.error(msg)
return False
path = os.path.join(remote_libs, lib_name.encode() + b".db")
if not os.path.exists(path):
cmd = f"ipfs get {_hash} -o".split()
cmd.append(path)
try:
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError):
self._log.error(f"Could not import {_hash}")
return False
# add all albums from remotes into a combined library
jpath = os.path.join(remote_libs, b"joined.db")
jlib = library.Library(jpath)
nlib = library.Library(path)
for album in nlib.albums():
if not self.already_added(album, jlib):
new_album = []
for item in album.items():
item.id = None
new_album.append(item)
added_album = jlib.add_album(new_album)
added_album.ipfs = album.ipfs
added_album.store()
def already_added(self, check, jlib):
for jalbum in jlib.albums():
if jalbum.mb_albumid == check.mb_albumid:
return True
return False
def ipfs_list(self, lib, args):
fmt = config['format_album'].get()
try:
albums = self.query(lib, args)
except OSError:
ui.print_("No imported libraries yet.")
return
for album in albums:
ui.print_(format(album, fmt), " : ", album.ipfs.decode())
def query(self, lib, args):
rlib = self.get_remote_lib(lib)
albums = rlib.albums(args)
return albums
def get_remote_lib(self, lib):
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
path = os.path.join(remote_libs, b"joined.db")
if not os.path.isfile(path):
raise OSError
return library.Library(path)
def ipfs_added_albums(self, rlib, tmpname):
""" Returns a new library with only albums/items added to ipfs
"""
tmplib = library.Library(tmpname)
for album in rlib.albums():
try:
if album.ipfs:
self.create_new_album(album, tmplib)
except AttributeError:
pass
return tmplib
def create_new_album(self, album, tmplib):
items = []
for item in album.items():
try:
if not item.ipfs:
break
except AttributeError:
pass
item_path = os.path.basename(item.path).decode(
util._fsencoding(), 'ignore'
)
# Clear current path from item
item.path = f'/ipfs/{album.ipfs}/{item_path}'
item.id = None
items.append(item)
if len(items) < 1:
return False
self._log.info("Adding '{0}' to temporary library", album)
new_album = tmplib.add_album(items)
new_album.ipfs = album.ipfs
new_album.store()
| 10,386
|
Python
|
.py
| 256
| 27.53125
| 78
| 0.523338
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,634
|
thumbnails.py
|
rembo10_headphones/lib/beetsplug/thumbnails.py
|
# This file is part of beets.
# Copyright 2016, Bruno Cauet
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Create freedesktop.org-compliant thumbnails for album folders
This plugin is POSIX-only.
Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html
"""
from hashlib import md5
import os
import shutil
from itertools import chain
from pathlib import PurePosixPath
import ctypes
import ctypes.util
from xdg import BaseDirectory
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs
from beets import util
from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal"))
LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large"))
class ThumbnailsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
'force': False,
'dolphin': False,
})
self.write_metadata = None
if self.config['auto'] and self._check_local_ok():
self.register_listener('art_set', self.process_album)
def commands(self):
thumbnails_command = Subcommand("thumbnails",
help="Create album thumbnails")
thumbnails_command.parser.add_option(
'-f', '--force',
dest='force', action='store_true', default=False,
help='force regeneration of thumbnails deemed fine (existing & '
'recent enough)')
thumbnails_command.parser.add_option(
'--dolphin', dest='dolphin', action='store_true', default=False,
help="create Dolphin-compatible thumbnail information (for KDE)")
thumbnails_command.func = self.process_query
return [thumbnails_command]
def process_query(self, lib, opts, args):
self.config.set_args(opts)
if self._check_local_ok():
for album in lib.albums(decargs(args)):
self.process_album(album)
def _check_local_ok(self):
"""Check that's everythings ready:
- local capability to resize images
- thumbnail dirs exist (create them if needed)
- detect whether we'll use PIL or IM
- detect whether we'll use GIO or Python to get URIs
"""
if not ArtResizer.shared.local:
self._log.warning("No local image resizing capabilities, "
"cannot generate thumbnails")
return False
for dir in (NORMAL_DIR, LARGE_DIR):
if not os.path.exists(dir):
os.makedirs(dir)
if get_im_version():
self.write_metadata = write_metadata_im
tool = "IM"
else:
assert get_pil_version() # since we're local
self.write_metadata = write_metadata_pil
tool = "PIL"
self._log.debug("using {0} to write metadata", tool)
uri_getter = GioURI()
if not uri_getter.available:
uri_getter = PathlibURI()
self._log.debug("using {0.name} to compute URIs", uri_getter)
self.get_uri = uri_getter.uri
return True
def process_album(self, album):
"""Produce thumbnails for the album folder.
"""
self._log.debug('generating thumbnail for {0}', album)
if not album.artpath:
self._log.info('album {0} has no art', album)
return
if self.config['dolphin']:
self.make_dolphin_cover_thumbnail(album)
size = ArtResizer.shared.get_size(album.artpath)
if not size:
self._log.warning('problem getting the picture size for {0}',
album.artpath)
return
wrote = True
if max(size) >= 256:
wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR)
wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR)
if wrote:
self._log.info('wrote thumbnail for {0}', album)
else:
self._log.info('nothing to do for {0}', album)
def make_cover_thumbnail(self, album, size, target_dir):
"""Make a thumbnail of given size for `album` and put it in
`target_dir`.
"""
target = os.path.join(target_dir, self.thumbnail_file_name(album.path))
if os.path.exists(target) and \
os.stat(target).st_mtime > os.stat(album.artpath).st_mtime:
if self.config['force']:
self._log.debug("found a suitable {1}x{1} thumbnail for {0}, "
"forcing regeneration", album, size)
else:
self._log.debug("{1}x{1} thumbnail for {0} exists and is "
"recent enough", album, size)
return False
resized = ArtResizer.shared.resize(size, album.artpath,
util.syspath(target))
self.add_tags(album, util.syspath(resized))
shutil.move(resized, target)
return True
def thumbnail_file_name(self, path):
"""Compute the thumbnail file name
See https://standards.freedesktop.org/thumbnail-spec/latest/x227.html
"""
uri = self.get_uri(path)
hash = md5(uri.encode('utf-8')).hexdigest()
return util.bytestring_path(f"{hash}.png")
def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail
See https://standards.freedesktop.org/thumbnail-spec/latest/x142.html
"""
mtime = os.stat(album.artpath).st_mtime
metadata = {"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": str(mtime)}
try:
self.write_metadata(image_path, metadata)
except Exception:
self._log.exception("could not write metadata to {0}",
util.displayable_path(image_path))
def make_dolphin_cover_thumbnail(self, album):
outfilename = os.path.join(album.path, b".directory")
if os.path.exists(outfilename):
return
artfile = os.path.split(album.artpath)[1]
with open(outfilename, 'w') as f:
f.write('[Desktop Entry]\n')
f.write('Icon=./{}'.format(artfile.decode('utf-8')))
f.close()
self._log.debug("Wrote file {0}", util.displayable_path(outfilename))
def write_metadata_im(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to IM."""
command = ['convert', file] + \
list(chain.from_iterable(('-set', k, v)
for k, v in metadata.items())) + [file]
util.command_output(command)
return True
def write_metadata_pil(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to PIL."""
from PIL import Image, PngImagePlugin
im = Image.open(file)
meta = PngImagePlugin.PngInfo()
for k, v in metadata.items():
meta.add_text(k, v, 0)
im.save(file, "PNG", pnginfo=meta)
return True
class URIGetter:
available = False
name = "Abstract base"
def uri(self, path):
raise NotImplementedError()
class PathlibURI(URIGetter):
available = True
name = "Python Pathlib"
def uri(self, path):
return PurePosixPath(util.py3_path(path)).as_uri()
def copy_c_string(c_string):
"""Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python
string and return it. The old memory is then safe to free.
"""
# This is a pretty dumb way to get a string copy, but it seems to
# work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch.
s = ctypes.cast(c_string, ctypes.c_char_p).value
return b'' + s
class GioURI(URIGetter):
"""Use gio URI function g_file_get_uri. Paths must be utf-8 encoded.
"""
name = "GIO"
def __init__(self):
self.libgio = self.get_library()
self.available = bool(self.libgio)
if self.available:
self.libgio.g_type_init() # for glib < 2.36
self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p]
self.libgio.g_file_new_for_path.restype = ctypes.c_void_p
self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p]
self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char)
self.libgio.g_object_unref.argtypes = [ctypes.c_void_p]
def get_library(self):
lib_name = ctypes.util.find_library("gio-2")
try:
if not lib_name:
return False
return ctypes.cdll.LoadLibrary(lib_name)
except OSError:
return False
def uri(self, path):
g_file_ptr = self.libgio.g_file_new_for_path(path)
if not g_file_ptr:
raise RuntimeError("No gfile pointer received for {}".format(
util.displayable_path(path)))
try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
finally:
self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr:
self.libgio.g_free(uri_ptr)
raise RuntimeError("No URI received from the gfile pointer for "
"{}".format(util.displayable_path(path)))
try:
uri = copy_c_string(uri_ptr)
finally:
self.libgio.g_free(uri_ptr)
try:
return uri.decode(util._fsencoding())
except UnicodeDecodeError:
raise RuntimeError(
f"Could not decode filename from GIO: {uri!r}"
)
| 10,269
|
Python
|
.py
| 239
| 33.41841
| 79
| 0.61385
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,635
|
replaygain.py
|
rembo10_headphones/lib/beetsplug/replaygain.py
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import collections
import enum
import math
import os
import signal
import subprocess
import sys
import warnings
from multiprocessing.pool import ThreadPool, RUN
from six.moves import queue
from threading import Thread, Event
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import (syspath, command_output, displayable_path,
py3_path, cpu_count)
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args, **kwargs):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args, **kwargs)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
"{} exited with status {}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# https://github.com/google-code-export/beets/issues/499
raise ReplayGainError("argument encoding failed")
def after_version(version_a, version_b):
return tuple(int(s) for s in version_a.split('.')) \
>= tuple(int(s) for s in version_b.split('.'))
def db_to_lufs(db):
"""Convert db to LUFS.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db - 107
def lufs_to_db(db):
"""Convert LUFS to db.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db + 107
# Backend base and plumbing classes.
# gain: in LU to reference level
# peak: part of full scale (FS is 1.0)
Gain = collections.namedtuple("Gain", "gain peak")
# album_gain: Gain object
# track_gains: list of Gain objects
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Peak(enum.Enum):
none = 0
true = 1
sample = 2
class Backend:
"""An abstract class representing engine for calculating RG values.
"""
do_parallel = False
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects.
"""
raise NotImplementedError()
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
raise NotImplementedError()
# ffmpeg backend
class FfmpegBackend(Backend):
"""A replaygain backend using ffmpeg's ebur128 filter.
"""
do_parallel = True
def __init__(self, config, log):
super().__init__(config, log)
self._ffmpeg_path = "ffmpeg"
# check that ffmpeg is installed
try:
ffmpeg_version_out = call([self._ffmpeg_path, "-version"])
except OSError:
raise FatalReplayGainError(
f"could not find ffmpeg at {self._ffmpeg_path}"
)
incompatible_ffmpeg = True
for line in ffmpeg_version_out.stdout.splitlines():
if line.startswith(b"configuration:"):
if b"--enable-libebur128" in line:
incompatible_ffmpeg = False
if line.startswith(b"libavfilter"):
version = line.split(b" ", 1)[1].split(b"/", 1)[0].split(b".")
version = tuple(map(int, version))
if version >= (6, 67, 100):
incompatible_ffmpeg = False
if incompatible_ffmpeg:
raise FatalReplayGainError(
"Installed FFmpeg version does not support ReplayGain."
"calculation. Either libavfilter version 6.67.100 or above or"
"the --enable-libebur128 configuration option is required."
)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of Gain objects (the track gains).
"""
gains = []
for item in items:
gains.append(
self._analyse_item(
item,
target_level,
peak,
count_blocks=False,
)[0] # take only the gain, discarding number of gating blocks
)
return gains
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
target_level_lufs = db_to_lufs(target_level)
# analyse tracks
# list of track Gain objects
track_gains = []
# maximum peak
album_peak = 0
# sum of BS.1770 gating block powers
sum_powers = 0
# total number of BS.1770 gating blocks
n_blocks = 0
for item in items:
track_gain, track_n_blocks = self._analyse_item(
item, target_level, peak
)
track_gains.append(track_gain)
# album peak is maximum track peak
album_peak = max(album_peak, track_gain.peak)
# prepare album_gain calculation
# total number of blocks is sum of track blocks
n_blocks += track_n_blocks
# convert `LU to target_level` -> LUFS
track_loudness = target_level_lufs - track_gain.gain
# This reverses ITU-R BS.1770-4 p. 6 equation (5) to convert
# from loudness to power. The result is the average gating
# block power.
track_power = 10**((track_loudness + 0.691) / 10)
# Weight that average power by the number of gating blocks to
# get the sum of all their powers. Add that to the sum of all
# block powers in this album.
sum_powers += track_power * track_n_blocks
# calculate album gain
if n_blocks > 0:
# compare ITU-R BS.1770-4 p. 6 equation (5)
# Album gain is the replaygain of the concatenation of all tracks.
album_gain = -0.691 + 10 * math.log10(sum_powers / n_blocks)
else:
album_gain = -70
# convert LUFS -> `LU to target_level`
album_gain = target_level_lufs - album_gain
self._log.debug(
"{}: gain {} LU, peak {}"
.format(items, album_gain, album_peak)
)
return AlbumGain(Gain(album_gain, album_peak), track_gains)
def _construct_cmd(self, item, peak_method):
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
"-nostats",
"-hide_banner",
"-i",
item.path,
"-map",
"a:0",
"-filter",
f"ebur128=peak={peak_method}",
"-f",
"null",
"-",
]
def _analyse_item(self, item, target_level, peak, count_blocks=True):
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
If `count_blocks` is False, the number of gating blocks returned
will be 0.
"""
target_level_lufs = db_to_lufs(target_level)
peak_method = peak.name
# call ffmpeg
self._log.debug(f"analyzing {item}")
cmd = self._construct_cmd(item, peak_method)
self._log.debug(
'executing {0}', ' '.join(map(displayable_path, cmd))
)
output = call(cmd).stderr.splitlines()
# parse output
if peak == Peak.none:
peak = 0
else:
line_peak = self._find_line(
output,
f" {peak_method.capitalize()} peak:".encode(),
start_line=len(output) - 1, step_size=-1,
)
peak = self._parse_float(
output[self._find_line(
output, b" Peak:",
line_peak,
)]
)
# convert TPFS -> part of FS
peak = 10**(peak / 20)
line_integrated_loudness = self._find_line(
output, b" Integrated loudness:",
start_line=len(output) - 1, step_size=-1,
)
gain = self._parse_float(
output[self._find_line(
output, b" I:",
line_integrated_loudness,
)]
)
# convert LUFS -> LU from target level
gain = target_level_lufs - gain
# count BS.1770 gating blocks
n_blocks = 0
if count_blocks:
gating_threshold = self._parse_float(
output[self._find_line(
output, b" Threshold:",
start_line=line_integrated_loudness,
)]
)
for line in output:
if not line.startswith(b"[Parsed_ebur128"):
continue
if line.endswith(b"Summary:"):
continue
line = line.split(b"M:", 1)
if len(line) < 2:
continue
if self._parse_float(b"M: " + line[1]) >= gating_threshold:
n_blocks += 1
self._log.debug(
"{}: {} blocks over {} LUFS"
.format(item, n_blocks, gating_threshold)
)
self._log.debug(
"{}: gain {} LU, peak {}"
.format(item, gain, peak)
)
return Gain(gain, peak), n_blocks
def _find_line(self, output, search, start_line=0, step_size=1):
"""Return index of line beginning with `search`.
Begins searching at index `start_line` in `output`.
"""
end_index = len(output) if step_size > 0 else -1
for i in range(start_line, end_index, step_size):
if output[i].startswith(search):
return i
raise ReplayGainError(
"ffmpeg output: missing {} after line {}"
.format(repr(search), start_line)
)
def _parse_float(self, line):
"""Extract a float from a key value pair in `line`.
This format is expected: /[^:]:[[:space:]]*value.*/, where `value` is
the float.
"""
# extract value
value = line.split(b":", 1)
if len(value) < 2:
raise ReplayGainError(
"ffmpeg output: expected key value pair, found {}"
.format(line)
)
value = value[1].lstrip()
# strip unit
value = value.split(b" ", 1)[0]
# cast value to float
try:
return float(value)
except ValueError:
raise ReplayGainError(
"ffmpeg output: expected float value, found {}"
.format(value)
)
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
do_parallel = True
def __init__(self, config, log):
super().__init__(config, log)
config.add({
'command': "",
'noclip': True,
})
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
'replaygain command does not exist: {}'.format(
self.command)
)
else:
# Check whether the program is in $PATH.
for cmd in ('mp3gain', 'aacgain'):
try:
call([cmd, '-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
def compute_track_gain(self, items, target_level, peak):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, target_level, False)
return output
def compute_album_gain(self, items, target_level, peak):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, items))
if len(supported_items) != len(items):
self._log.debug('tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, target_level, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, target_level, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug('no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, '-o', '-s', 's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ['-k']
else:
# Disable clipping warning.
cmd = cmd + ['-c']
cmd = cmd + ['-d', str(int(target_level - 89))]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug('analyzing {0} files', len(items))
self._log.debug("executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd).stdout
self._log.debug('analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug('bad tool output: {0}', text)
raise ReplayGainError('mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super().__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
f"Failed to load GStreamer 1.0: {e}"
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, target_level, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
self._rg.set_property("reference-level", target_level)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items, target_level, peak):
self.compute(items, target_level, False)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, items, target_level, peak):
items = list(items)
self.compute(items, target_level, True)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item]["TRACK_GAIN"]
peak = self._file_tags[item]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError("results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1]]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError("results missing for album")
return AlbumGain(Gain(gain, peak), track_gains)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
f"Error {err!r} - {debug!r} on file {f!r}"
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super().__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(py3_path(syspath(item.path)))
except OSError:
raise ReplayGainError(
f"File {item.path} was not found"
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
f"Unsupported file type {item.format}"
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
f"Unsupported sample rate {item.samplerate}")
return
return rg
def compute_track_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item, target_level) for item in items]
def _with_target_level(self, gain, target_level):
"""Return `gain` relative to `target_level`.
Assumes `gain` is relative to 89 db.
"""
return gain + (target_level - 89)
def _title_gain(self, rg, audiofile, target_level):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
gain, peak = rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug('error in rg.title_gain() call: {}', exc)
raise ReplayGainError('audiotools audio data error')
return self._with_target_level(gain, target_level), peak
def _compute_track_gain(self, item, target_level):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
self._log.debug('ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, items, target_level, peak):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(items)[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in items:
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug('ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
rg_album_gain = self._with_target_level(rg_album_gain, target_level)
self._log.debug('ReplayGain for album {0}: {1:.2f}, {2:.2f}',
items[0].album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
class ExceptionWatcher(Thread):
"""Monitors a queue for exceptions asynchronously.
Once an exception occurs, raise it and execute a callback.
"""
def __init__(self, queue, callback):
self._queue = queue
self._callback = callback
self._stopevent = Event()
Thread.__init__(self)
def run(self):
while not self._stopevent.is_set():
try:
exc = self._queue.get_nowait()
self._callback()
raise exc[1].with_traceback(exc[2])
except queue.Empty:
# No exceptions yet, loop back to check
# whether `_stopevent` is set
pass
def join(self, timeout=None):
self._stopevent.set()
Thread.join(self, timeout)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"ffmpeg": FfmpegBackend,
}
peak_methods = {
"true": Peak.true,
"sample": Peak.sample,
}
def __init__(self):
super().__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': 'command',
'threads': cpu_count(),
'parallel_on_import': False,
'per_disc': False,
'peak': 'true',
'targetlevel': 89,
'r128': ['Opus'],
'r128_targetlevel': lufs_to_db(-23),
})
self.overwrite = self.config['overwrite'].get(bool)
self.per_disc = self.config['per_disc'].get(bool)
# Remember which backend is used for CLI feedback
self.backend_name = self.config['backend'].as_str()
if self.backend_name not in self.backends:
raise ui.UserError(
"Selected ReplayGain backend {} is not supported. "
"Please select one of: {}".format(
self.backend_name,
', '.join(self.backends.keys())
)
)
peak_method = self.config["peak"].as_str()
if peak_method not in self.peak_methods:
raise ui.UserError(
"Selected ReplayGain peak method {} is not supported. "
"Please select one of: {}".format(
peak_method,
', '.join(self.peak_methods.keys())
)
)
self._peak_method = self.peak_methods[peak_method]
# On-import analysis.
if self.config['auto']:
self.register_listener('import_begin', self.import_begin)
self.register_listener('import', self.import_end)
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try:
self.backend_instance = self.backends[self.backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
f'replaygain initialization failed: {e}')
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item):
return self.overwrite or \
(self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug('applied track gain {0} LU, peak {1} of FS',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, item, album_gain):
item.rg_album_gain = album_gain.gain
item.rg_album_peak = album_gain.peak
item.store()
self._log.debug('applied album gain {0} LU, peak {1} of FS',
item.rg_album_gain, item.rg_album_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = track_gain.gain
item.store()
self._log.debug('applied r128 track gain {0} LU',
item.r128_track_gain)
def store_album_r128_gain(self, item, album_gain):
item.r128_album_gain = album_gain.gain
item.store()
self._log.debug('applied r128 album gain {0} LU',
item.r128_album_gain)
def tag_specific_values(self, items):
"""Return some tag specific values.
Returns a tuple (store_track_gain, store_album_gain, target_level,
peak_method).
"""
if any([self.should_use_r128(item) for item in items]):
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
target_level = self.config['r128_targetlevel'].as_number()
peak = Peak.none # R128_* tags do not store the track/album peak
else:
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
target_level = self.config['targetlevel'].as_number()
peak = self._peak_method
return store_track_gain, store_album_gain, target_level, peak
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not force and not self.album_requires_gain(album):
self._log.info('Skipping album {0}', album)
return
if (any([self.should_use_r128(item) for item in album.items()]) and not
all([self.should_use_r128(item) for item in album.items()])):
self._log.error(
"Cannot calculate gain for album {0} (incompatible formats)",
album)
return
self._log.info('analyzing {0}', album)
tag_vals = self.tag_specific_values(album.items())
store_track_gain, store_album_gain, target_level, peak = tag_vals
discs = {}
if self.per_disc:
for item in album.items():
if discs.get(item.disc) is None:
discs[item.disc] = []
discs[item.disc].append(item)
else:
discs[1] = album.items()
for discnumber, items in discs.items():
def _store_album(album_gain):
if not album_gain or not album_gain.album_gain \
or len(album_gain.track_gains) != len(items):
# In some cases, backends fail to produce a valid
# `album_gain` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
"ReplayGain backend `{}` failed "
"for some tracks in album {}"
.format(self.backend_name, album)
)
for item, track_gain in zip(items,
album_gain.track_gains):
store_track_gain(item, track_gain)
store_album_gain(item, album_gain.album_gain)
if write:
item.try_write()
self._log.debug('done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_album_gain, args=(),
kwds={
"items": list(items),
"target_level": target_level,
"peak": peak
},
callback=_store_album
)
except ReplayGainError as e:
self._log.info("ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
f"Fatal replay gain error: {e}")
def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not force and not self.track_requires_gain(item):
self._log.info('Skipping track {0}', item)
return
tag_vals = self.tag_specific_values([item])
store_track_gain, store_album_gain, target_level, peak = tag_vals
def _store_track(track_gains):
if not track_gains or len(track_gains) != 1:
# In some cases, backends fail to produce a valid
# `track_gains` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
"ReplayGain backend `{}` failed for track {}"
.format(self.backend_name, item)
)
store_track_gain(item, track_gains[0])
if write:
item.try_write()
self._log.debug('done analyzing {0}', item)
try:
self._apply(
self.backend_instance.compute_track_gain, args=(),
kwds={
"items": [item],
"target_level": target_level,
"peak": peak,
},
callback=_store_track
)
except ReplayGainError as e:
self._log.info("ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(f"Fatal replay gain error: {e}")
def _has_pool(self):
"""Check whether a `ThreadPool` is running instance in `self.pool`
"""
if hasattr(self, 'pool'):
if isinstance(self.pool, ThreadPool) and self.pool._state == RUN:
return True
return False
def open_pool(self, threads):
"""Open a `ThreadPool` instance in `self.pool`
"""
if not self._has_pool() and self.backend_instance.do_parallel:
self.pool = ThreadPool(threads)
self.exc_queue = queue.Queue()
signal.signal(signal.SIGINT, self._interrupt)
self.exc_watcher = ExceptionWatcher(
self.exc_queue, # threads push exceptions here
self.terminate_pool # abort once an exception occurs
)
self.exc_watcher.start()
def _apply(self, func, args, kwds, callback):
if self._has_pool():
def catch_exc(func, exc_queue, log):
"""Wrapper to catch raised exceptions in threads
"""
def wfunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except ReplayGainError as e:
log.info(e.args[0]) # log non-fatal exceptions
except Exception:
exc_queue.put(sys.exc_info())
return wfunc
# Wrap function and callback to catch exceptions
func = catch_exc(func, self.exc_queue, self._log)
callback = catch_exc(callback, self.exc_queue, self._log)
self.pool.apply_async(func, args, kwds, callback)
else:
callback(func(*args, **kwds))
def terminate_pool(self):
"""Terminate the `ThreadPool` instance in `self.pool`
(e.g. stop execution in case of exception)
"""
# Don't call self._as_pool() here,
# self.pool._state may not be == RUN
if hasattr(self, 'pool') and isinstance(self.pool, ThreadPool):
self.pool.terminate()
self.pool.join()
# self.exc_watcher.join()
def _interrupt(self, signal, frame):
try:
self._log.info('interrupted')
self.terminate_pool()
sys.exit(0)
except SystemExit:
# Silence raised SystemExit ~ exit(0)
pass
def close_pool(self):
"""Close the `ThreadPool` instance in `self.pool` (if there is one)
"""
if self._has_pool():
self.pool.close()
self.pool.join()
self.exc_watcher.join()
def import_begin(self, session):
"""Handle `import_begin` event -> open pool
"""
threads = self.config['threads'].get(int)
if self.config['parallel_on_import'] \
and self.config['auto'] \
and threads:
self.open_pool(threads)
def import_end(self, paths):
"""Handle `import` event -> close pool
"""
self.close_pool()
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if self.config['auto']:
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def command_func(self, lib, opts, args):
try:
write = ui.should_write(opts.write)
force = opts.force
# Bypass self.open_pool() if called with `--threads 0`
if opts.threads != 0:
threads = opts.threads or self.config['threads'].get(int)
self.open_pool(threads)
if opts.album:
albums = lib.albums(ui.decargs(args))
self._log.info(
"Analyzing {} albums ~ {} backend..."
.format(len(albums), self.backend_name)
)
for album in albums:
self.handle_album(album, write, force)
else:
items = lib.items(ui.decargs(args))
self._log.info(
"Analyzing {} tracks ~ {} backend..."
.format(len(items), self.backend_name)
)
for item in items:
self.handle_track(item, write, force)
self.close_pool()
except (SystemExit, KeyboardInterrupt):
# Silence interrupt exceptions
pass
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
cmd = ui.Subcommand('replaygain', help='analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.parser.add_option(
"-t", "--threads", dest="threads", type=int,
help='change the number of threads, \
defaults to maximum available processors'
)
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help="analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help="write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help="don't write metadata (opposite of -w)")
cmd.func = self.command_func
return [cmd]
| 49,203
|
Python
|
.py
| 1,153
| 31.451865
| 79
| 0.572531
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,636
|
mbsync.py
|
rembo10_headphones/lib/beetsplug/mbsync.py
|
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Update library's tags using MusicBrainz.
"""
from beets.plugins import BeetsPlugin, apply_item_changes
from beets import autotag, library, ui, util
from beets.autotag import hooks
from collections import defaultdict
import re
MBID_REGEX = r"(\d|\w){8}-(\d|\w){4}-(\d|\w){4}-(\d|\w){4}-(\d|\w){12}"
class MBSyncPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
def commands(self):
cmd = ui.Subcommand('mbsync',
help='update metadata from musicbrainz')
cmd.parser.add_option(
'-p', '--pretend', action='store_true',
help='show all changes but do nothing')
cmd.parser.add_option(
'-m', '--move', action='store_true', dest='move',
help="move files in the library directory")
cmd.parser.add_option(
'-M', '--nomove', action='store_false', dest='move',
help="don't move files in library")
cmd.parser.add_option(
'-W', '--nowrite', action='store_false',
default=None, dest='write',
help="don't write updated metadata to files")
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the mbsync function.
"""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
query = ui.decargs(args)
self.singletons(lib, query, move, pretend, write)
self.albums(lib, query, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ['singleton:true']):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info('Skipping singleton with no mb_trackid: {0}',
item_formatted)
continue
# Do we have a valid MusicBrainz track ID?
if not re.match(MBID_REGEX, item.mb_trackid):
self._log.info('Skipping singleton with invalid mb_trackid:' +
' {0}', item_formatted)
continue
# Get the MusicBrainz recording info.
track_info = hooks.track_for_mbid(item.mb_trackid)
if not track_info:
self._log.info('Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
continue
# Apply.
with lib.transaction():
autotag.apply_item_metadata(item, track_info)
apply_item_changes(lib, item, move, pretend, write)
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for a in lib.albums(query):
album_formatted = format(a)
if not a.mb_albumid:
self._log.info('Skipping album with no mb_albumid: {0}',
album_formatted)
continue
items = list(a.items())
# Do we have a valid MusicBrainz album ID?
if not re.match(MBID_REGEX, a.mb_albumid):
self._log.info('Skipping album with invalid mb_albumid: {0}',
album_formatted)
continue
# Get the MusicBrainz album information.
album_info = hooks.album_for_mbid(a.mb_albumid)
if not album_info:
self._log.info('Release ID {0} not found for album {1}',
a.mb_albumid,
album_formatted)
continue
# Map release track and recording MBIDs to their information.
# Recordings can appear multiple times on a release, so each MBID
# maps to a list of TrackInfo objects.
releasetrack_index = {}
track_index = defaultdict(list)
for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs (release track MBIDs
# first, if available, and recording MBIDs otherwise). This should
# work for albums that have missing or extra tracks.
mapping = {}
for item in items:
if item.mb_releasetrackid and \
item.mb_releasetrackid in releasetrack_index:
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates:
if (c.medium_index == item.track and
c.medium == item.disc):
mapping[item] = c
break
# Apply.
self._log.debug('applying changes to {}', album_formatted)
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
# Find any changed item to apply MusicBrainz changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if not changed:
# No change to any item.
continue
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
a[key] = any_changed_item[key]
a.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug('moving album {0}', album_formatted)
a.move()
| 7,421
|
Python
|
.py
| 155
| 33.619355
| 79
| 0.551015
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,637
|
fish.py
|
rembo10_headphones/lib/beetsplug/fish.py
|
# This file is part of beets.
# Copyright 2015, winters jean-marie.
# Copyright 2020, Justin Mayer <https://justinmayer.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This plugin generates tab completions for Beets commands for the Fish shell
<https://fishshell.com/>, including completions for Beets commands, plugin
commands, and option flags. Also generated are completions for all the album
and track fields, suggesting for example `genre:` or `album:` when querying the
Beets database. Completions for the *values* of those fields are not generated
by default but can be added via the `-e` / `--extravalues` flag. For example:
`beet fish -e genre -e albumartist`
"""
from beets.plugins import BeetsPlugin
from beets import library, ui
from beets.ui import commands
from operator import attrgetter
import os
BL_NEED2 = """complete -c beet -n '__fish_beet_needs_command' {} {}\n"""
BL_USE3 = """complete -c beet -n '__fish_beet_using_command {}' {} {}\n"""
BL_SUBS = """complete -c beet -n '__fish_at_level {} ""' {} {}\n"""
BL_EXTRA3 = """complete -c beet -n '__fish_beet_use_extra {}' {} {}\n"""
HEAD = '''
function __fish_beet_needs_command
set cmd (commandline -opc)
if test (count $cmd) -eq 1
return 0
end
return 1
end
function __fish_beet_using_command
set cmd (commandline -opc)
set needle (count $cmd)
if test $needle -gt 1
if begin test $argv[1] = $cmd[2];
and not contains -- $cmd[$needle] $FIELDS; end
return 0
end
end
return 1
end
function __fish_beet_use_extra
set cmd (commandline -opc)
set needle (count $cmd)
if test $argv[2] = $cmd[$needle]
return 0
end
return 1
end
'''
class FishPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand('fish', help='generate Fish shell tab completions')
cmd.func = self.run
cmd.parser.add_option('-f', '--noFields', action='store_true',
default=False,
help='omit album/track field completions')
cmd.parser.add_option(
'-e',
'--extravalues',
action='append',
type='choice',
choices=library.Item.all_keys() +
library.Album.all_keys(),
help='include specified field *values* in completions')
return [cmd]
def run(self, lib, opts, args):
# Gather the commands from Beets core and its plugins.
# Collect the album and track fields.
# If specified, also collect the values for these fields.
# Make a giant string of all the above, formatted in a way that
# allows Fish to do tab completion for the `beet` command.
home_dir = os.path.expanduser("~")
completion_dir = os.path.join(home_dir, '.config/fish/completions')
try:
os.makedirs(completion_dir)
except OSError:
if not os.path.isdir(completion_dir):
raise
completion_file_path = os.path.join(completion_dir, 'beet.fish')
nobasicfields = opts.noFields # Do not complete for album/track fields
extravalues = opts.extravalues # e.g., Also complete artists names
beetcmds = sorted(
(commands.default_commands +
commands.plugins.commands()),
key=attrgetter('name'))
fields = sorted(set(
library.Album.all_keys() + library.Item.all_keys()))
# Collect commands, their aliases, and their help text
cmd_names_help = []
for cmd in beetcmds:
names = list(cmd.aliases)
names.append(cmd.name)
for name in names:
cmd_names_help.append((name, cmd.help))
# Concatenate the string
totstring = HEAD + "\n"
totstring += get_cmds_list([name[0] for name in cmd_names_help])
totstring += '' if nobasicfields else get_standard_fields(fields)
totstring += get_extravalues(lib, extravalues) if extravalues else ''
totstring += "\n" + "# ====== {} =====".format(
"setup basic beet completion") + "\n" * 2
totstring += get_basic_beet_options()
totstring += "\n" + "# ====== {} =====".format(
"setup field completion for subcommands") + "\n"
totstring += get_subcommands(
cmd_names_help, nobasicfields, extravalues)
# Set up completion for all the command options
totstring += get_all_commands(beetcmds)
with open(completion_file_path, 'w') as fish_file:
fish_file.write(totstring)
def _escape(name):
# Escape ? in fish
if name == "?":
name = "\\" + name
return name
def get_cmds_list(cmds_names):
# Make a list of all Beets core & plugin commands
substr = ''
substr += (
"set CMDS " + " ".join(cmds_names) + ("\n" * 2)
)
return substr
def get_standard_fields(fields):
# Make a list of album/track fields and append with ':'
fields = (field + ":" for field in fields)
substr = ''
substr += (
"set FIELDS " + " ".join(fields) + ("\n" * 2)
)
return substr
def get_extravalues(lib, extravalues):
# Make a list of all values from an album/track field.
# 'beet ls albumartist: <TAB>' yields completions for ABBA, Beatles, etc.
word = ''
values_set = get_set_of_values_for_field(lib, extravalues)
for fld in extravalues:
extraname = fld.upper() + 'S'
word += (
"set " + extraname + " " + " ".join(sorted(values_set[fld]))
+ ("\n" * 2)
)
return word
def get_set_of_values_for_field(lib, fields):
# Get unique values from a specified album/track field
fields_dict = {}
for each in fields:
fields_dict[each] = set()
for item in lib.items():
for field in fields:
fields_dict[field].add(wrap(item[field]))
return fields_dict
def get_basic_beet_options():
word = (
BL_NEED2.format("-l format-item",
"-f -d 'print with custom format'") +
BL_NEED2.format("-l format-album",
"-f -d 'print with custom format'") +
BL_NEED2.format("-s l -l library",
"-f -r -d 'library database file to use'") +
BL_NEED2.format("-s d -l directory",
"-f -r -d 'destination music directory'") +
BL_NEED2.format("-s v -l verbose",
"-f -d 'print debugging information'") +
BL_NEED2.format("-s c -l config",
"-f -r -d 'path to configuration file'") +
BL_NEED2.format("-s h -l help",
"-f -d 'print this help message and exit'"))
return word
def get_subcommands(cmd_name_and_help, nobasicfields, extravalues):
# Formatting for Fish to complete our fields/values
word = ""
for cmdname, cmdhelp in cmd_name_and_help:
cmdname = _escape(cmdname)
word += "\n" + "# ------ {} -------".format(
"fieldsetups for " + cmdname) + "\n"
word += (
BL_NEED2.format(
("-a " + cmdname),
("-f " + "-d " + wrap(clean_whitespace(cmdhelp)))))
if nobasicfields is False:
word += (
BL_USE3.format(
cmdname,
("-a " + wrap("$FIELDS")),
("-f " + "-d " + wrap("fieldname"))))
if extravalues:
for f in extravalues:
setvar = wrap("$" + f.upper() + "S")
word += " ".join(BL_EXTRA3.format(
(cmdname + " " + f + ":"),
('-f ' + '-A ' + '-a ' + setvar),
('-d ' + wrap(f))).split()) + "\n"
return word
def get_all_commands(beetcmds):
# Formatting for Fish to complete command options
word = ""
for cmd in beetcmds:
names = list(cmd.aliases)
names.append(cmd.name)
for name in names:
name = _escape(name)
word += "\n"
word += ("\n" * 2) + "# ====== {} =====".format(
"completions for " + name) + "\n"
for option in cmd.parser._get_all_options()[1:]:
cmd_l = (" -l " + option._long_opts[0].replace('--', '')
)if option._long_opts else ''
cmd_s = (" -s " + option._short_opts[0].replace('-', '')
) if option._short_opts else ''
cmd_need_arg = ' -r ' if option.nargs in [1] else ''
cmd_helpstr = (" -d " + wrap(' '.join(option.help.split()))
) if option.help else ''
cmd_arglist = (' -a ' + wrap(" ".join(option.choices))
) if option.choices else ''
word += " ".join(BL_USE3.format(
name,
(cmd_need_arg + cmd_s + cmd_l + " -f " + cmd_arglist),
cmd_helpstr).split()) + "\n"
word = (word + " ".join(BL_USE3.format(
name,
("-s " + "h " + "-l " + "help" + " -f "),
('-d ' + wrap("print help") + "\n")
).split()))
return word
def clean_whitespace(word):
# Remove excess whitespace and tabs in a string
return " ".join(word.split())
def wrap(word):
# Need " or ' around strings but watch out if they're in the string
sptoken = '\"'
if ('"') in word and ("'") in word:
word.replace('"', sptoken)
return '"' + word + '"'
tok = '"' if "'" in word else "'"
return tok + word + tok
| 10,224
|
Python
|
.py
| 245
| 32.595918
| 79
| 0.560419
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,638
|
aura.py
|
rembo10_headphones/lib/beetsplug/aura.py
|
# This file is part of beets.
# Copyright 2020, Callum Brown.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""An AURA server using Flask."""
from mimetypes import guess_type
import re
import os.path
from os.path import isfile, getsize
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, _open_library
from beets import config
from beets.util import py3_path
from beets.library import Item, Album
from beets.dbcore.query import (
MatchQuery,
NotQuery,
RegexpQuery,
AndQuery,
FixedFieldSort,
SlowFieldSort,
MultipleSort,
)
from flask import (
Blueprint,
Flask,
current_app,
send_file,
make_response,
request,
)
# Constants
# AURA server information
# TODO: Add version information
SERVER_INFO = {
"aura-version": "0",
"server": "beets-aura",
"server-version": "0.1",
"auth-required": False,
"features": ["albums", "artists", "images"],
}
# Maps AURA Track attribute to beets Item attribute
TRACK_ATTR_MAP = {
# Required
"title": "title",
"artist": "artist",
# Optional
"album": "album",
"track": "track", # Track number on album
"tracktotal": "tracktotal",
"disc": "disc",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"bpm": "bpm",
"genre": "genre",
"recording-mbid": "mb_trackid", # beets trackid is MB recording
"track-mbid": "mb_releasetrackid",
"composer": "composer",
"albumartist": "albumartist",
"comments": "comments",
# Optional for Audio Metadata
# TODO: Support the mimetype attribute, format != mime type
# "mimetype": track.format,
"duration": "length",
"framerate": "samplerate",
# I don't think beets has a framecount field
# "framecount": ???,
"channels": "channels",
"bitrate": "bitrate",
"bitdepth": "bitdepth",
"size": "filesize",
}
# Maps AURA Album attribute to beets Album attribute
ALBUM_ATTR_MAP = {
# Required
"title": "album",
"artist": "albumartist",
# Optional
"tracktotal": "albumtotal",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"genre": "genre",
"release-mbid": "mb_albumid",
"release-group-mbid": "mb_releasegroupid",
}
# Maps AURA Artist attribute to beets Item field
# Artists are not first-class in beets, so information is extracted from
# beets Items.
ARTIST_ATTR_MAP = {
# Required
"name": "artist",
# Optional
"artist-mbid": "mb_artistid",
}
class AURADocument:
"""Base class for building AURA documents."""
@staticmethod
def error(status, title, detail):
"""Make a response for an error following the JSON:API spec.
Args:
status: An HTTP status code string, e.g. "404 Not Found".
title: A short, human-readable summary of the problem.
detail: A human-readable explanation specific to this
occurrence of the problem.
"""
document = {
"errors": [{"status": status, "title": title, "detail": detail}]
}
return make_response(document, status)
def translate_filters(self):
"""Translate filters from request arguments to a beets Query."""
# The format of each filter key in the request parameter is:
# filter[<attribute>]. This regex extracts <attribute>.
pattern = re.compile(r"filter\[(?P<attribute>[a-zA-Z0-9_-]+)\]")
queries = []
for key, value in request.args.items():
match = pattern.match(key)
if match:
# Extract attribute name from key
aura_attr = match.group("attribute")
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
converter = self.get_attribute_converter(beets_attr)
value = converter(value)
# Add exact match query to list
# Use a slow query so it works with all fields
queries.append(MatchQuery(beets_attr, value, fast=False))
# NOTE: AURA doesn't officially support multiple queries
return AndQuery(queries)
def translate_sorts(self, sort_arg):
"""Translate an AURA sort parameter into a beets Sort.
Args:
sort_arg: The value of the 'sort' query parameter; a comma
separated list of fields to sort by, in order.
E.g. "-year,title".
"""
# Change HTTP query parameter to a list
aura_sorts = sort_arg.strip(",").split(",")
sorts = []
for aura_attr in aura_sorts:
if aura_attr[0] == "-":
ascending = False
# Remove leading "-"
aura_attr = aura_attr[1:]
else:
# JSON:API default
ascending = True
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
# Use slow sort so it works with all fields (inc. computed)
sorts.append(SlowFieldSort(beets_attr, ascending=ascending))
return MultipleSort(sorts)
def paginate(self, collection):
"""Get a page of the collection and the URL to the next page.
Args:
collection: The raw data from which resource objects can be
built. Could be an sqlite3.Cursor object (tracks and
albums) or a list of strings (artists).
"""
# Pages start from zero
page = request.args.get("page", 0, int)
# Use page limit defined in config by default.
default_limit = config["aura"]["page_limit"].get(int)
limit = request.args.get("limit", default_limit, int)
# start = offset of first item to return
start = page * limit
# end = offset of last item + 1
end = start + limit
if end > len(collection):
end = len(collection)
next_url = None
else:
# Not the last page so work out links.next url
if not request.args:
# No existing arguments, so current page is 0
next_url = request.url + "?page=1"
elif not request.args.get("page", None):
# No existing page argument, so add one to the end
next_url = request.url + "&page=1"
else:
# Increment page token by 1
next_url = request.url.replace(
f"page={page}", "page={}".format(page + 1)
)
# Get only the items in the page range
data = [self.resource_object(collection[i]) for i in range(start, end)]
return data, next_url
def get_included(self, data, include_str):
"""Build a list of resource objects for inclusion.
Args:
data: An array of dicts in the form of resource objects.
include_str: A comma separated list of resource types to
include. E.g. "tracks,images".
"""
# Change HTTP query parameter to a list
to_include = include_str.strip(",").split(",")
# Build a list of unique type and id combinations
# For each resource object in the primary data, iterate over it's
# relationships. If a relationship matches one of the types
# requested for inclusion (e.g. "albums") then add each type-id pair
# under the "data" key to unique_identifiers, checking first that
# it has not already been added. This ensures that no resources are
# included more than once.
unique_identifiers = []
for res_obj in data:
for rel_name, rel_obj in res_obj["relationships"].items():
if rel_name in to_include:
# NOTE: Assumes relationship is to-many
for identifier in rel_obj["data"]:
if identifier not in unique_identifiers:
unique_identifiers.append(identifier)
# TODO: I think this could be improved
included = []
for identifier in unique_identifiers:
res_type = identifier["type"]
if res_type == "track":
track_id = int(identifier["id"])
track = current_app.config["lib"].get_item(track_id)
included.append(TrackDocument.resource_object(track))
elif res_type == "album":
album_id = int(identifier["id"])
album = current_app.config["lib"].get_album(album_id)
included.append(AlbumDocument.resource_object(album))
elif res_type == "artist":
artist_id = identifier["id"]
included.append(ArtistDocument.resource_object(artist_id))
elif res_type == "image":
image_id = identifier["id"]
included.append(ImageDocument.resource_object(image_id))
else:
raise ValueError(f"Invalid resource type: {res_type}")
return included
def all_resources(self):
"""Build document for /tracks, /albums or /artists."""
query = self.translate_filters()
sort_arg = request.args.get("sort", None)
if sort_arg:
sort = self.translate_sorts(sort_arg)
# For each sort field add a query which ensures all results
# have a non-empty, non-zero value for that field.
for s in sort.sorts:
query.subqueries.append(
NotQuery(
# Match empty fields (^$) or zero fields, (^0$)
RegexpQuery(s.field, "(^$|^0$)", fast=False)
)
)
else:
sort = None
# Get information from the library
collection = self.get_collection(query=query, sort=sort)
# Convert info to AURA form and paginate it
data, next_url = self.paginate(collection)
document = {"data": data}
# If there are more pages then provide a way to access them
if next_url:
document["links"] = {"next": next_url}
# Include related resources for each element in "data"
include_str = request.args.get("include", None)
if include_str:
document["included"] = self.get_included(data, include_str)
return document
def single_resource_document(self, resource_object):
"""Build document for a specific requested resource.
Args:
resource_object: A dictionary in the form of a JSON:API
resource object.
"""
document = {"data": resource_object}
include_str = request.args.get("include", None)
if include_str:
# [document["data"]] is because arg needs to be list
document["included"] = self.get_included(
[document["data"]], include_str
)
return document
class TrackDocument(AURADocument):
"""Class for building documents for /tracks endpoints."""
attribute_map = TRACK_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Item objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return current_app.config["lib"].items(query, sort)
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
# filesize is a special field (read from disk not db?)
if beets_attr == "filesize":
converter = int
else:
try:
# Look for field in list of Item fields
# and get python type of database type.
# See beets.library.Item and beets.dbcore.types
converter = Item._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(track):
"""Construct a JSON:API resource object from a beets Item.
Args:
track: A beets Item object.
"""
attributes = {}
# Use aura => beets attribute map, e.g. size => filesize
for aura_attr, beets_attr in TRACK_ATTR_MAP.items():
a = getattr(track, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could result in required attributes not being set
if a:
attributes[aura_attr] = a
# JSON:API one-to-many relationship to parent album
relationships = {
"artists": {"data": [{"type": "artist", "id": track.artist}]}
}
# Only add album relationship if not singleton
if not track.singleton:
relationships["albums"] = {
"data": [{"type": "album", "id": str(track.album_id)}]
}
return {
"type": "track",
"id": str(track.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, track_id):
"""Get track from the library and build a document.
Args:
track_id: The beets id of the track (integer).
"""
track = current_app.config["lib"].get_item(track_id)
if not track:
return self.error(
"404 Not Found",
"No track with the requested id.",
"There is no track with an id of {} in the library.".format(
track_id
),
)
return self.single_resource_document(self.resource_object(track))
class AlbumDocument(AURADocument):
"""Class for building documents for /albums endpoints."""
attribute_map = ALBUM_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Album objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return current_app.config["lib"].albums(query, sort)
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
try:
# Look for field in list of Album fields
# and get python type of database type.
# See beets.library.Album and beets.dbcore.types
converter = Album._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(album):
"""Construct a JSON:API resource object from a beets Album.
Args:
album: A beets Album object.
"""
attributes = {}
# Use aura => beets attribute name map
for aura_attr, beets_attr in ALBUM_ATTR_MAP.items():
a = getattr(album, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
# Get beets Item objects for all tracks in the album sorted by
# track number. Sorting is not required but it's nice.
query = MatchQuery("album_id", album.id)
sort = FixedFieldSort("track", ascending=True)
tracks = current_app.config["lib"].items(query, sort)
# JSON:API one-to-many relationship to tracks on the album
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
# Add images relationship if album has associated images
if album.artpath:
path = py3_path(album.artpath)
filename = path.split("/")[-1]
image_id = f"album-{album.id}-{filename}"
relationships["images"] = {
"data": [{"type": "image", "id": image_id}]
}
# Add artist relationship if artist name is same on tracks
# Tracks are used to define artists so don't albumartist
# Check for all tracks in case some have featured artists
if album.albumartist in [t.artist for t in tracks]:
relationships["artists"] = {
"data": [{"type": "artist", "id": album.albumartist}]
}
return {
"type": "album",
"id": str(album.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, album_id):
"""Get album from the library and build a document.
Args:
album_id: The beets id of the album (integer).
"""
album = current_app.config["lib"].get_album(album_id)
if not album:
return self.error(
"404 Not Found",
"No album with the requested id.",
"There is no album with an id of {} in the library.".format(
album_id
),
)
return self.single_resource_document(self.resource_object(album))
class ArtistDocument(AURADocument):
"""Class for building documents for /artists endpoints."""
attribute_map = ARTIST_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get a list of artist names from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
# Gets only tracks with matching artist information
tracks = current_app.config["lib"].items(query, sort)
collection = []
for track in tracks:
# Do not add duplicates
if track.artist not in collection:
collection.append(track.artist)
return collection
def get_attribute_converter(self, beets_attr):
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "artist".
"""
try:
# Look for field in list of Item fields
# and get python type of database type.
# See beets.library.Item and beets.dbcore.types
converter = Item._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
converter = str
return converter
@staticmethod
def resource_object(artist_id):
"""Construct a JSON:API resource object for the given artist.
Args:
artist_id: A string which is the artist's name.
"""
# Get tracks where artist field exactly matches artist_id
query = MatchQuery("artist", artist_id)
tracks = current_app.config["lib"].items(query)
if not tracks:
return None
# Get artist information from the first track
# NOTE: It could be that the first track doesn't have a
# MusicBrainz id but later tracks do, which isn't ideal.
attributes = {}
# Use aura => beets attribute map, e.g. artist => name
for aura_attr, beets_attr in ARTIST_ATTR_MAP.items():
a = getattr(tracks[0], beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
album_query = MatchQuery("albumartist", artist_id)
albums = current_app.config["lib"].albums(query=album_query)
if len(albums) != 0:
relationships["albums"] = {
"data": [{"type": "album", "id": str(a.id)} for a in albums]
}
return {
"type": "artist",
"id": artist_id,
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, artist_id):
"""Get info for the requested artist and build a document.
Args:
artist_id: A string which is the artist's name.
"""
artist_resource = self.resource_object(artist_id)
if not artist_resource:
return self.error(
"404 Not Found",
"No artist with the requested id.",
"There is no artist with an id of {} in the library.".format(
artist_id
),
)
return self.single_resource_document(artist_resource)
def safe_filename(fn):
"""Check whether a string is a simple (non-path) filename.
For example, `foo.txt` is safe because it is a "plain" filename. But
`foo/bar.txt` and `../foo.txt` and `.` are all non-safe because they
can traverse to other directories other than the current one.
"""
# Rule out any directories.
if os.path.basename(fn) != fn:
return False
# In single names, rule out Unix directory traversal names.
if fn in ('.', '..'):
return False
return True
class ImageDocument(AURADocument):
"""Class for building documents for /images/(id) endpoints."""
@staticmethod
def get_image_path(image_id):
"""Works out the full path to the image with the given id.
Returns None if there is no such image.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
# Split image_id into its constituent parts
id_split = image_id.split("-")
if len(id_split) < 3:
# image_id is not in the required format
return None
parent_type = id_split[0]
parent_id = id_split[1]
img_filename = "-".join(id_split[2:])
if not safe_filename(img_filename):
return None
# Get the path to the directory parent's images are in
if parent_type == "album":
album = current_app.config["lib"].get_album(int(parent_id))
if not album or not album.artpath:
return None
# Cut the filename off of artpath
# This is in preparation for supporting images in the same
# directory that are not tracked by beets.
artpath = py3_path(album.artpath)
dir_path = "/".join(artpath.split("/")[:-1])
else:
# Images for other resource types are not supported
return None
img_path = os.path.join(dir_path, img_filename)
# Check the image actually exists
if isfile(img_path):
return img_path
else:
return None
@staticmethod
def resource_object(image_id):
"""Construct a JSON:API resource object for the given image.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
# Could be called as a static method, so can't use
# self.get_image_path()
image_path = ImageDocument.get_image_path(image_id)
if not image_path:
return None
attributes = {
"role": "cover",
"mimetype": guess_type(image_path)[0],
"size": getsize(image_path),
}
try:
from PIL import Image
except ImportError:
pass
else:
im = Image.open(image_path)
attributes["width"] = im.width
attributes["height"] = im.height
relationships = {}
# Split id into [parent_type, parent_id, filename]
id_split = image_id.split("-")
relationships[id_split[0] + "s"] = {
"data": [{"type": id_split[0], "id": id_split[1]}]
}
return {
"id": image_id,
"type": "image",
# Remove attributes that are None, 0, "", etc.
"attributes": {k: v for k, v in attributes.items() if v},
"relationships": relationships,
}
def single_resource(self, image_id):
"""Get info for the requested image and build a document.
Args:
image_id: A string in the form
"<parent_type>-<parent_id>-<img_filename>".
"""
image_resource = self.resource_object(image_id)
if not image_resource:
return self.error(
"404 Not Found",
"No image with the requested id.",
"There is no image with an id of {} in the library.".format(
image_id
),
)
return self.single_resource_document(image_resource)
# Initialise flask blueprint
aura_bp = Blueprint("aura_bp", __name__)
@aura_bp.route("/server")
def server_info():
"""Respond with info about the server."""
return {"data": {"type": "server", "id": "0", "attributes": SERVER_INFO}}
# Track endpoints
@aura_bp.route("/tracks")
def all_tracks():
"""Respond with a list of all tracks and related information."""
doc = TrackDocument()
return doc.all_resources()
@aura_bp.route("/tracks/<int:track_id>")
def single_track(track_id):
"""Respond with info about the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
doc = TrackDocument()
return doc.single_resource(track_id)
@aura_bp.route("/tracks/<int:track_id>/audio")
def audio_file(track_id):
"""Supply an audio file for the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
track = current_app.config["lib"].get_item(track_id)
if not track:
return AURADocument.error(
"404 Not Found",
"No track with the requested id.",
"There is no track with an id of {} in the library.".format(
track_id
),
)
path = py3_path(track.path)
if not isfile(path):
return AURADocument.error(
"404 Not Found",
"No audio file for the requested track.",
(
"There is no audio file for track {} at the expected location"
).format(track_id),
)
file_mimetype = guess_type(path)[0]
if not file_mimetype:
return AURADocument.error(
"500 Internal Server Error",
"Requested audio file has an unknown mimetype.",
(
"The audio file for track {} has an unknown mimetype. "
"Its file extension is {}."
).format(track_id, path.split(".")[-1]),
)
# Check that the Accept header contains the file's mimetype
# Takes into account */* and audio/*
# Adding support for the bitrate parameter would require some effort so I
# left it out. This means the client could be sent an error even if the
# audio doesn't need transcoding.
if not request.accept_mimetypes.best_match([file_mimetype]):
return AURADocument.error(
"406 Not Acceptable",
"Unsupported MIME type or bitrate parameter in Accept header.",
(
"The audio file for track {} is only available as {} and "
"bitrate parameters are not supported."
).format(track_id, file_mimetype),
)
return send_file(
path,
mimetype=file_mimetype,
# Handles filename in Content-Disposition header
as_attachment=True,
# Tries to upgrade the stream to support range requests
conditional=True,
)
# Album endpoints
@aura_bp.route("/albums")
def all_albums():
"""Respond with a list of all albums and related information."""
doc = AlbumDocument()
return doc.all_resources()
@aura_bp.route("/albums/<int:album_id>")
def single_album(album_id):
"""Respond with info about the specified album.
Args:
album_id: The id of the album provided in the URL (integer).
"""
doc = AlbumDocument()
return doc.single_resource(album_id)
# Artist endpoints
# Artist ids are their names
@aura_bp.route("/artists")
def all_artists():
"""Respond with a list of all artists and related information."""
doc = ArtistDocument()
return doc.all_resources()
# Using the path converter allows slashes in artist_id
@aura_bp.route("/artists/<path:artist_id>")
def single_artist(artist_id):
"""Respond with info about the specified artist.
Args:
artist_id: The id of the artist provided in the URL. A string
which is the artist's name.
"""
doc = ArtistDocument()
return doc.single_resource(artist_id)
# Image endpoints
# Image ids are in the form <parent_type>-<parent_id>-<img_filename>
# For example: album-13-cover.jpg
@aura_bp.route("/images/<string:image_id>")
def single_image(image_id):
"""Respond with info about the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "<parent_type>-<parent_id>-<img_filename>".
"""
doc = ImageDocument()
return doc.single_resource(image_id)
@aura_bp.route("/images/<string:image_id>/file")
def image_file(image_id):
"""Supply an image file for the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "<parent_type>-<parent_id>-<img_filename>".
"""
img_path = ImageDocument.get_image_path(image_id)
if not img_path:
return AURADocument.error(
"404 Not Found",
"No image with the requested id.",
"There is no image with an id of {} in the library".format(
image_id
),
)
return send_file(img_path)
# WSGI app
def create_app():
"""An application factory for use by a WSGI server."""
config["aura"].add(
{
"host": "127.0.0.1",
"port": 8337,
"cors": [],
"cors_supports_credentials": False,
"page_limit": 500,
}
)
app = Flask(__name__)
# Register AURA blueprint view functions under a URL prefix
app.register_blueprint(aura_bp, url_prefix="/aura")
# AURA specifies mimetype MUST be this
app.config["JSONIFY_MIMETYPE"] = "application/vnd.api+json"
# Disable auto-sorting of JSON keys
app.config["JSON_SORT_KEYS"] = False
# Provide a way to access the beets library
# The normal method of using the Library and config provided in the
# command function is not used because create_app() could be called
# by an external WSGI server.
# NOTE: this uses a 'private' function from beets.ui.__init__
app.config["lib"] = _open_library(config)
# Enable CORS if required
cors = config["aura"]["cors"].as_str_seq(list)
if cors:
from flask_cors import CORS
# "Accept" is the only header clients use
app.config["CORS_ALLOW_HEADERS"] = "Accept"
app.config["CORS_RESOURCES"] = {r"/aura/*": {"origins": cors}}
app.config["CORS_SUPPORTS_CREDENTIALS"] = config["aura"][
"cors_supports_credentials"
].get(bool)
CORS(app)
return app
# Beets Plugin Hook
class AURAPlugin(BeetsPlugin):
"""The BeetsPlugin subclass for the AURA server plugin."""
def __init__(self):
"""Add configuration options for the AURA plugin."""
super().__init__()
def commands(self):
"""Add subcommand used to run the AURA server."""
def run_aura(lib, opts, args):
"""Run the application using Flask's built in-server.
Args:
lib: A beets Library object (not used).
opts: Command line options. An optparse.Values object.
args: The list of arguments to process (not used).
"""
app = create_app()
# Start the built-in server (not intended for production)
app.run(
host=self.config["host"].get(str),
port=self.config["port"].get(int),
debug=opts.debug,
threaded=True,
)
run_aura_cmd = Subcommand("aura", help="run an AURA server")
run_aura_cmd.parser.add_option(
"-d",
"--debug",
action="store_true",
default=False,
help="use Flask debug mode",
)
run_aura_cmd.func = run_aura
return [run_aura_cmd]
| 33,545
|
Python
|
.py
| 843
| 30.288256
| 79
| 0.588895
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,639
|
bareasc.py
|
rembo10_headphones/lib/beetsplug/bareasc.py
|
# This file is part of beets.
# Copyright 2016, Philippe Mongeau.
# Copyright 2021, Graham R. Cobb.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and ascociated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# This module is adapted from Fuzzy in accordance to the licence of
# that module
"""Provides a bare-ASCII matching query."""
from beets import ui
from beets.ui import print_, decargs
from beets.plugins import BeetsPlugin
from beets.dbcore.query import StringFieldQuery
from unidecode import unidecode
class BareascQuery(StringFieldQuery):
"""Compare items using bare ASCII, without accents etc."""
@classmethod
def string_match(cls, pattern, val):
"""Convert both pattern and string to plain ASCII before matching.
If pattern is all lower case, also convert string to lower case so
match is also case insensitive
"""
# smartcase
if pattern.islower():
val = val.lower()
pattern = unidecode(pattern)
val = unidecode(val)
return pattern in val
class BareascPlugin(BeetsPlugin):
"""Plugin to provide bare-ASCII option for beets matching."""
def __init__(self):
"""Default prefix for selecting bare-ASCII matching is #."""
super().__init__()
self.config.add({
'prefix': '#',
})
def queries(self):
"""Register bare-ASCII matching."""
prefix = self.config['prefix'].as_str()
return {prefix: BareascQuery}
def commands(self):
"""Add bareasc command as unidecode version of 'list'."""
cmd = ui.Subcommand('bareasc',
help='unidecode version of beet list command')
cmd.parser.usage += "\n" \
'Example: %prog -f \'$album: $title\' artist:beatles'
cmd.parser.add_all_common_options()
cmd.func = self.unidecode_list
return [cmd]
def unidecode_list(self, lib, opts, args):
"""Emulate normal 'list' command but with unidecode output."""
query = decargs(args)
album = opts.album
# Copied from commands.py - list_items
if album:
for album in lib.albums(query):
bare = unidecode(str(album))
print_(bare)
else:
for item in lib.items(query):
bare = unidecode(str(item))
print_(bare)
| 2,866
|
Python
|
.py
| 71
| 33.464789
| 74
| 0.659124
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,640
|
export.py
|
rembo10_headphones/lib/beetsplug/export.py
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Exports data from beets
"""
import sys
import codecs
import json
import csv
from xml.etree import ElementTree
from datetime import datetime, date
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import mediafile
from beetsplug.info import library_data, tag_data
class ExportEncoder(json.JSONEncoder):
"""Deals with dates because JSON doesn't have a standard"""
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class ExportPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'default_format': 'json',
'json': {
# JSON module formatting options.
'formatting': {
'ensure_ascii': False,
'indent': 4,
'separators': (',', ': '),
'sort_keys': True
}
},
'jsonlines': {
# JSON Lines formatting options.
'formatting': {
'ensure_ascii': False,
'separators': (',', ': '),
'sort_keys': True
}
},
'csv': {
# CSV module formatting options.
'formatting': {
# The delimiter used to seperate columns.
'delimiter': ',',
# The dialect to use when formating the file output.
'dialect': 'excel'
}
},
'xml': {
# XML module formatting options.
'formatting': {}
}
# TODO: Use something like the edit plugin
# 'item_fields': []
})
def commands(self):
cmd = ui.Subcommand('export', help='export data from beets')
cmd.func = self.run
cmd.parser.add_option(
'-l', '--library', action='store_true',
help='show library fields instead of tags',
)
cmd.parser.add_option(
'-a', '--album', action='store_true',
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
'--append', action='store_true', default=False,
help='if should append data to the file',
)
cmd.parser.add_option(
'-i', '--include-keys', default=[],
action='append', dest='included_keys',
help='comma separated list of keys to show',
)
cmd.parser.add_option(
'-o', '--output',
help='path for the output file. If not given, will print the data'
)
cmd.parser.add_option(
'-f', '--format', default='json',
help="the output format: json (default), jsonlines, csv, or xml"
)
return [cmd]
def run(self, lib, opts, args):
file_path = opts.output
file_mode = 'a' if opts.append else 'w'
file_format = opts.format or self.config['default_format'].get(str)
file_format_is_line_based = (file_format == 'jsonlines')
format_options = self.config[file_format]['formatting'].get(dict)
export_format = ExportFormat.factory(
file_type=file_format,
**{
'file_path': file_path,
'file_mode': file_mode
}
)
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(','))
items = []
for data_emitter in data_collector(
lib, ui.decargs(args),
album=opts.album,
):
try:
data, item = data_emitter(included_keys or '*')
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error('cannot read file: {0}', ex)
continue
for key, value in data.items():
if isinstance(value, bytes):
data[key] = util.displayable_path(value)
if file_format_is_line_based:
export_format.export(data, **format_options)
else:
items += [data]
if not file_format_is_line_based:
export_format.export(items, **format_options)
class ExportFormat:
"""The output format type"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
self.path = file_path
self.mode = file_mode
self.encoding = encoding
# creates a file object to write/append or sets to stdout
self.out_stream = codecs.open(self.path, self.mode, self.encoding) \
if self.path else sys.stdout
@classmethod
def factory(cls, file_type, **kwargs):
if file_type in ["json", "jsonlines"]:
return JsonFormat(**kwargs)
elif file_type == "csv":
return CSVFormat(**kwargs)
elif file_type == "xml":
return XMLFormat(**kwargs)
else:
raise NotImplementedError()
def export(self, data, **kwargs):
raise NotImplementedError()
class JsonFormat(ExportFormat):
"""Saves in a json file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
json.dump(data, self.out_stream, cls=ExportEncoder, **kwargs)
self.out_stream.write('\n')
class CSVFormat(ExportFormat):
"""Saves in a csv file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
header = list(data[0].keys()) if data else []
writer = csv.DictWriter(self.out_stream, fieldnames=header, **kwargs)
writer.writeheader()
writer.writerows(data)
class XMLFormat(ExportFormat):
"""Saves in a xml file"""
def __init__(self, file_path, file_mode='w', encoding='utf-8'):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
# Creates the XML file structure.
library = ElementTree.Element('library')
tracks = ElementTree.SubElement(library, 'tracks')
if data and isinstance(data[0], dict):
for index, item in enumerate(data):
track = ElementTree.SubElement(tracks, 'track')
for key, value in item.items():
track_details = ElementTree.SubElement(track, key)
track_details.text = value
# Depending on the version of python the encoding needs to change
try:
data = ElementTree.tostring(library, encoding='unicode', **kwargs)
except LookupError:
data = ElementTree.tostring(library, encoding='utf-8', **kwargs)
self.out_stream.write(data)
| 7,737
|
Python
|
.py
| 194
| 29.639175
| 78
| 0.571105
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,641
|
the.py
|
rembo10_headphones/lib/beetsplug/the.py
|
# This file is part of beets.
# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves patterns in path formats (suitable for moving articles)."""
import re
from beets.plugins import BeetsPlugin
__author__ = 'baobab@heresiarch.info'
__version__ = '1.1'
PATTERN_THE = '^the\\s'
PATTERN_A = '^[a][n]?\\s'
FORMAT = '{0}, {1}'
class ThePlugin(BeetsPlugin):
patterns = []
def __init__(self):
super().__init__()
self.template_funcs['the'] = self.the_template_func
self.config.add({
'the': True,
'a': True,
'format': '{0}, {1}',
'strip': False,
'patterns': [],
})
self.patterns = self.config['patterns'].as_str_seq()
for p in self.patterns:
if p:
try:
re.compile(p)
except re.error:
self._log.error('invalid pattern: {0}', p)
else:
if not (p.startswith('^') or p.endswith('$')):
self._log.warning('warning: \"{0}\" will not '
'match string start/end', p)
if self.config['a']:
self.patterns = [PATTERN_A] + self.patterns
if self.config['the']:
self.patterns = [PATTERN_THE] + self.patterns
if not self.patterns:
self._log.warning('no patterns defined!')
def unthe(self, text, pattern):
"""Moves pattern in the path format string or strips it
text -- text to handle
pattern -- regexp pattern (case ignore is already on)
strip -- if True, pattern will be removed
"""
if text:
r = re.compile(pattern, flags=re.IGNORECASE)
try:
t = r.findall(text)[0]
except IndexError:
return text
else:
r = re.sub(r, '', text).strip()
if self.config['strip']:
return r
else:
fmt = self.config['format'].as_str()
return fmt.format(r, t.strip()).strip()
else:
return ''
def the_template_func(self, text):
if not self.patterns:
return text
if text:
for p in self.patterns:
r = self.unthe(text, p)
if r != text:
self._log.debug('\"{0}\" -> \"{1}\"', text, r)
break
return r
else:
return ''
| 3,140
|
Python
|
.py
| 83
| 27.373494
| 71
| 0.541749
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,642
|
bpsync.py
|
rembo10_headphones/lib/beetsplug/bpsync.py
|
# This file is part of beets.
# Copyright 2019, Rahul Ahuja.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Update library's tags using Beatport.
"""
from beets.plugins import BeetsPlugin, apply_item_changes
from beets import autotag, library, ui, util
from .beatport import BeatportPlugin
class BPSyncPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.beatport_plugin = BeatportPlugin()
self.beatport_plugin.setup()
def commands(self):
cmd = ui.Subcommand('bpsync', help='update metadata from Beatport')
cmd.parser.add_option(
'-p',
'--pretend',
action='store_true',
help='show all changes but do nothing',
)
cmd.parser.add_option(
'-m',
'--move',
action='store_true',
dest='move',
help="move files in the library directory",
)
cmd.parser.add_option(
'-M',
'--nomove',
action='store_false',
dest='move',
help="don't move files in library",
)
cmd.parser.add_option(
'-W',
'--nowrite',
action='store_false',
default=None,
dest='write',
help="don't write updated metadata to files",
)
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the bpsync function.
"""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
query = ui.decargs(args)
self.singletons(lib, query, move, pretend, write)
self.albums(lib, query, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ['singleton:true']):
if not item.mb_trackid:
self._log.info(
'Skipping singleton with no mb_trackid: {}', item
)
continue
if not self.is_beatport_track(item):
self._log.info(
'Skipping non-{} singleton: {}',
self.beatport_plugin.data_source,
item,
)
continue
# Apply.
trackinfo = self.beatport_plugin.track_for_id(item.mb_trackid)
with lib.transaction():
autotag.apply_item_metadata(item, trackinfo)
apply_item_changes(lib, item, move, pretend, write)
@staticmethod
def is_beatport_track(item):
return (
item.get('data_source') == BeatportPlugin.data_source
and item.mb_trackid.isnumeric()
)
def get_album_tracks(self, album):
if not album.mb_albumid:
self._log.info('Skipping album with no mb_albumid: {}', album)
return False
if not album.mb_albumid.isnumeric():
self._log.info(
'Skipping album with invalid {} ID: {}',
self.beatport_plugin.data_source,
album,
)
return False
items = list(album.items())
if album.get('data_source') == self.beatport_plugin.data_source:
return items
if not all(self.is_beatport_track(item) for item in items):
self._log.info(
'Skipping non-{} release: {}',
self.beatport_plugin.data_source,
album,
)
return False
return items
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for album in lib.albums(query):
# Do we have a valid Beatport album?
items = self.get_album_tracks(album)
if not items:
continue
# Get the Beatport album information.
albuminfo = self.beatport_plugin.album_for_id(album.mb_albumid)
if not albuminfo:
self._log.info(
'Release ID {} not found for album {}',
album.mb_albumid,
album,
)
continue
beatport_trackid_to_trackinfo = {
track.track_id: track for track in albuminfo.tracks
}
library_trackid_to_item = {
int(item.mb_trackid): item for item in items
}
item_to_trackinfo = {
item: beatport_trackid_to_trackinfo[track_id]
for track_id, item in library_trackid_to_item.items()
}
self._log.info('applying changes to {}', album)
with lib.transaction():
autotag.apply_metadata(albuminfo, item_to_trackinfo)
changed = False
# Find any changed item to apply Beatport changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if pretend or not changed:
continue
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = any_changed_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug('moving album {}', album)
album.move()
| 6,591
|
Python
|
.py
| 166
| 27.692771
| 76
| 0.552069
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,643
|
discogs.py
|
rembo10_headphones/lib/beetsplug/discogs.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
python3-discogs-client library.
"""
import beets.ui
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.plugins import MetadataSourcePlugin, BeetsPlugin, get_distance
import confuse
from discogs_client import Release, Master, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import http.client
import beets
import re
import time
import json
import socket
import os
import traceback
from string import ascii_lowercase
USER_AGENT = f'beets/{beets.__version__} +https://beets.io/'
API_KEY = 'rAzVUQYRaoFjeBjyWuWZ'
API_SECRET = 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy'
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, http.client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'apikey': API_KEY,
'apisecret': API_SECRET,
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
'user_token': '',
'separator': ', ',
'index_tracks': False,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.config['user_token'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config['user_token'].as_str()
if user_token:
# The rate limit for authenticated users goes up to 60
# requests per minute.
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete token file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confuse.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('communication with Discogs failed')
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError('Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('Discogs token request failed')
# Save the token for later use.
self._log.debug('Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
return get_distance(
data_source='Discogs',
info=album_info,
config=self.config
)
def track_distance(self, item, track_info):
"""Returns the track distance.
"""
return get_distance(
data_source='Discogs',
info=track_info,
config=self.config
)
def candidates(self, items, artist, album, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = f'{artist} {album}'
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in album search', exc_info=True)
return []
@staticmethod
def extract_release_id_regex(album_id):
"""Returns the Discogs_id or None."""
# Discogs-IDs are simple integers. In order to avoid confusion with
# other metadata plugins, we only look for very specific formats of the
# input string:
# - plain integer, optionally wrapped in brackets and prefixed by an
# 'r', as this is how discogs displays the release ID on its webpage.
# - legacy url format: discogs.com/<name of release>/release/<id>
# - current url format: discogs.com/release/<id>-<name of release>
# See #291, #4080 and #4085 for the discussions leading up to these
# patterns.
# Regex has been tested here https://regex101.com/r/wyLdB4/2
for pattern in [
r'^\[?r?(?P<id>\d+)\]?$',
r'discogs\.com/release/(?P<id>\d+)-',
r'discogs\.com/[^/]+/release/(?P<id>\d+)',
]:
match = re.search(pattern, album_id)
if match:
return int(match.group('id'))
return None
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug('Searching for release {0}', album_id)
discogs_id = self.extract_release_id_regex(album_id)
if not discogs_id:
return None
result = Release(self.discogs_client, {'id': discogs_id})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in album lookup',
exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'(?u)\W+', ' ', query)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug("Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_master_year(self, master_id):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug('Searching for master release {0}', master_id)
result = Master(self.discogs_client, {'id': master_id})
try:
year = result.fetch('year')
return year
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in master release lookup',
exc_info=True)
return None
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warning("Release does not contain the required fields")
return None
artist, artist_id = MetadataSourcePlugin.get_artist(
[a.data for a in result.artists]
)
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = [t.medium for t in tracks]
country = result.data.get('country')
data_url = result.data.get('uri')
style = self.format(result.data.get('styles'))
genre = self.format(result.data.get('genres'))
discogs_albumid = self.extract_release_id(result.data.get('uri'))
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = labelid = None
if result.data.get('formats'):
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
labelid = result.data['labels'][0].get('id')
# Additional cleanups (various artists name, catalog number, media).
if va:
artist = config['va_name'].as_str()
if catalogno == 'none':
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(album=album, album_id=album_id, artist=artist,
artist_id=artist_id, tracks=tracks,
albumtype=albumtype, va=va, year=year,
label=label, mediums=len(set(mediums)),
releasegroup_id=master_id, catalognum=catalogno,
country=country, style=style, genre=genre,
media=media, original_year=original_year,
data_source='Discogs', data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid, discogs_artistid=artist_id)
def format(self, classification):
if classification:
return self.config['separator'].as_str() \
.join(sorted(classification))
else:
return None
def extract_release_id(self, uri):
if uri:
return uri.split("/")[-1]
else:
return None
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug('{}', traceback.format_exc())
self._log.error('uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = []
index_tracks = {}
index = 0
# Distinct works and intra-work divisions, as defined by index tracks.
divisions, next_divisions = [], []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
if next_divisions:
# End of a block of index tracks: update the current
# divisions.
divisions += next_divisions
del next_divisions[:]
track_info = self.get_track_info(track, index, divisions)
track_info.track_alt = track['position']
tracks.append(track_info)
else:
next_divisions.append(track['title'])
# We expect new levels of division at the beginning of the
# tracklist (and possibly elsewhere).
try:
divisions.pop()
except IndexError:
pass
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted({track.medium.lower() for track in tracks})
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium) - 64 != side_count + 1
)
if not medium_is_index and medium != track.medium:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '{}{}'.format(idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
# Concatenate index with track title when index_tracks
# option is set
if self.config['index_tracks']:
for subtrack in subtracks:
subtrack['title'] = '{}: {}'.format(
index_track['title'], subtrack['title'])
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index, divisions):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
if self.config['index_tracks']:
prefix = ', '.join(divisions)
if prefix:
title = f'{prefix}: {title}'
track_id = None
medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = MetadataSourcePlugin.get_artist(
track.get('artists', [])
)
length = self.get_track_length(track['duration'])
return TrackInfo(title=title, track_id=track_id, artist=artist,
artist_id=artist_id, length=length, index=index,
medium=medium, medium_index=medium_index)
def get_track_index(self, position):
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match:
medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else:
self._log.debug('Invalid position: {0}', position)
medium = index = subindex = None
return medium or None, index or None, subindex or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
| 25,616
|
Python
|
.py
| 540
| 35.053704
| 79
| 0.572131
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,644
|
scrub.py
|
rembo10_headphones/lib/beetsplug/scrub.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Cleans extraneous metadata from files' tags via a command or
automatically whenever tags are written.
"""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
from beets import config
import mediafile
import mutagen
_MUTAGEN_FORMATS = {
'asf': 'ASF',
'apev2': 'APEv2File',
'flac': 'FLAC',
'id3': 'ID3FileType',
'mp3': 'MP3',
'mp4': 'MP4',
'oggflac': 'OggFLAC',
'oggspeex': 'OggSpeex',
'oggtheora': 'OggTheora',
'oggvorbis': 'OggVorbis',
'oggopus': 'OggOpus',
'trueaudio': 'TrueAudio',
'wavpack': 'WavPack',
'monkeysaudio': 'MonkeysAudio',
'optimfrog': 'OptimFROG',
}
class ScrubPlugin(BeetsPlugin):
"""Removes extraneous metadata from files' tags."""
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
})
if self.config['auto']:
self.register_listener("import_task_files", self.import_task_files)
def commands(self):
def scrub_func(lib, opts, args):
# Walk through matching files and remove tags.
for item in lib.items(ui.decargs(args)):
self._log.info('scrubbing: {0}',
util.displayable_path(item.path))
self._scrub_item(item, opts.write)
scrub_cmd = ui.Subcommand('scrub', help='clean audio tags')
scrub_cmd.parser.add_option(
'-W', '--nowrite', dest='write',
action='store_false', default=True,
help='leave tags empty')
scrub_cmd.func = scrub_func
return [scrub_cmd]
@staticmethod
def _mutagen_classes():
"""Get a list of file type classes from the Mutagen module.
"""
classes = []
for modname, clsname in _MUTAGEN_FORMATS.items():
mod = __import__(f'mutagen.{modname}',
fromlist=[clsname])
classes.append(getattr(mod, clsname))
return classes
def _scrub(self, path):
"""Remove all tags from a file.
"""
for cls in self._mutagen_classes():
# Try opening the file with this type, but just skip in the
# event of any error.
try:
f = cls(util.syspath(path))
except Exception:
continue
if f.tags is None:
continue
# Remove the tag for this type.
try:
f.delete()
except NotImplementedError:
# Some Mutagen metadata subclasses (namely, ASFTag) do not
# support .delete(), presumably because it is impossible to
# remove them. In this case, we just remove all the tags.
for tag in f.keys():
del f[tag]
f.save()
except (OSError, mutagen.MutagenError) as exc:
self._log.error('could not scrub {0}: {1}',
util.displayable_path(path), exc)
def _scrub_item(self, item, restore=True):
"""Remove tags from an Item's associated file and, if `restore`
is enabled, write the database's tags back to the file.
"""
# Get album art if we need to restore it.
if restore:
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
except mediafile.UnreadableFileError as exc:
self._log.error('could not open file to scrub: {0}',
exc)
return
images = mf.images
# Remove all tags.
self._scrub(item.path)
# Restore tags, if enabled.
if restore:
self._log.debug('writing new tags after scrub')
item.try_write()
if images:
self._log.debug('restoring art')
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
mf.images = images
mf.save()
except mediafile.UnreadableFileError as exc:
self._log.error('could not write tags: {0}', exc)
def import_task_files(self, session, task):
"""Automatically scrub imported files."""
for item in task.imported_items():
self._log.debug('auto-scrubbing {0}',
util.displayable_path(item.path))
self._scrub_item(item)
| 5,248
|
Python
|
.py
| 132
| 29.128788
| 79
| 0.571288
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,645
|
duplicates.py
|
rembo10_headphones/lib/beetsplug/duplicates.py
|
# This file is part of beets.
# Copyright 2016, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess, \
bytestring_path, MoveOperation, decode_commandline_path
from beets.library import Item, Album
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super().__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option(
'-c', '--count', dest='count',
action='store_true',
help='show duplicate counts',
)
self._command.parser.add_option(
'-C', '--checksum', dest='checksum',
action='store', metavar='PROG',
help='report duplicates based on arbitrary command',
)
self._command.parser.add_option(
'-d', '--delete', dest='delete',
action='store_true',
help='delete items from library and disk',
)
self._command.parser.add_option(
'-F', '--full', dest='full',
action='store_true',
help='show all versions of duplicate tracks or albums',
)
self._command.parser.add_option(
'-s', '--strict', dest='strict',
action='store_true',
help='report duplicates only if all attributes are set',
)
self._command.parser.add_option(
'-k', '--key', dest='keys',
action='append', metavar='KEY',
help='report duplicates based on keys (use multiple times)',
)
self._command.parser.add_option(
'-M', '--merge', dest='merge',
action='store_true',
help='merge duplicate items',
)
self._command.parser.add_option(
'-m', '--move', dest='move',
action='store', metavar='DEST',
help='move items to dest',
)
self._command.parser.add_option(
'-o', '--copy', dest='copy',
action='store', metavar='DEST',
help='copy items to dest',
)
self._command.parser.add_option(
'-t', '--tag', dest='tag',
action='store',
help='tag matched items with \'k=v\' attribute',
)
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = bytestring_path(self.config['copy'].as_str())
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].as_str_seq()
merge = self.config['merge'].get(bool)
move = bytestring_path(self.config['move'].as_str())
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
# If there's nothing to do, return early. The code below assumes
# `items` to be non-empty.
if not items:
return
if path:
fmt = '$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, operation=MoveOperation.COPY)
item.store()
if move:
item.move(basedir=move)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except Exception:
raise UserError(
f"{PLUGIN}: can't parse k=v tag: {tag}"
)
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=decode_commandline_path(item.path))
for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug('key {0} on item {1} not cached:'
'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args).stdout
setattr(item, key, checksum)
item.store()
self._log.debug('computed checksum for {0} using {1}',
item.title, key)
except subprocess.CalledProcessError as e:
self._log.debug('failed to checksum {0}: {1}',
displayable_path(item.path), e)
else:
self._log.debug('key {0} on item {1} cached:'
'not computing checksum',
key, displayable_path(item.path))
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
if strict and len(values) < len(keys):
self._log.debug('some keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
elif (not strict and not len(values)):
self._log.debug('all keys {0} on item {1} are null or empty:'
' skipping',
keys, displayable_path(obj.path))
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
kind = 'items' if all(isinstance(o, Item) for o in objs) else 'albums'
if tiebreak and kind in tiebreak.keys():
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
if kind == 'items':
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, str) else True)
fields = Item.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = Item.all_keys()
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ''):
value = getattr(o, f, None)
if value:
self._log.debug('key {0} on item {1} is null '
'or empty: setting from item {2}',
f, displayable_path(objs[0].path),
displayable_path(o.path))
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug('item {0} missing from album {1}:'
' merging from {2} into {3}',
missing,
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(operation=MoveOperation.COPY)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])
| 13,282
|
Python
|
.py
| 308
| 28.461039
| 79
| 0.497102
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,646
|
filefilter.py
|
rembo10_headphones/lib/beetsplug/filefilter.py
|
# This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Filter imported files using a regular expression.
"""
import re
from beets import config
from beets.util import bytestring_path
from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask
class FileFilterPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener('import_task_created',
self.import_task_created_event)
self.config.add({
'path': '.*'
})
self.path_album_regex = \
self.path_singleton_regex = \
re.compile(bytestring_path(self.config['path'].get()))
if 'album_path' in self.config:
self.path_album_regex = re.compile(
bytestring_path(self.config['album_path'].get()))
if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile(
bytestring_path(self.config['singleton_path'].get()))
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
items_to_import = []
for item in task.items:
if self.file_filter(item['path']):
items_to_import.append(item)
if len(items_to_import) > 0:
task.items = items_to_import
else:
# Returning an empty list of tasks from the handler
# drops the task from the rest of the importer pipeline.
return []
elif isinstance(task, SingletonImportTask):
if not self.file_filter(task.item['path']):
return []
# If not filtered, return the original task unchanged.
return [task]
def file_filter(self, full_path):
"""Checks if the configured regular expressions allow the import
of the file given in full_path.
"""
import_config = dict(config['import'])
full_path = bytestring_path(full_path)
if 'singletons' not in import_config or not import_config[
'singletons']:
# Album
return self.path_album_regex.match(full_path) is not None
else:
# Singleton
return self.path_singleton_regex.match(full_path) is not None
| 2,919
|
Python
|
.py
| 67
| 34.641791
| 73
| 0.637324
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,647
|
freedesktop.py
|
rembo10_headphones/lib/beetsplug/freedesktop.py
|
# This file is part of beets.
# Copyright 2016, Matt Lichtenberg.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Creates freedesktop.org-compliant .directory files on an album level.
"""
from beets.plugins import BeetsPlugin
from beets import ui
class FreedesktopPlugin(BeetsPlugin):
def commands(self):
deprecated = ui.Subcommand(
"freedesktop",
help="Print a message to redirect to thumbnails --dolphin")
deprecated.func = self.deprecation_message
return [deprecated]
def deprecation_message(self, lib, opts, args):
ui.print_("This plugin is deprecated. Its functionality is "
"superseded by the 'thumbnails' plugin")
ui.print_("'thumbnails --dolphin' replaces freedesktop. See doc & "
"changelog for more information")
| 1,374
|
Python
|
.py
| 29
| 42.448276
| 75
| 0.731889
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,648
|
fuzzy.py
|
rembo10_headphones/lib/beetsplug/fuzzy.py
|
# This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides a fuzzy matching query.
"""
from beets.plugins import BeetsPlugin
from beets.dbcore.query import StringFieldQuery
from beets import config
import difflib
class FuzzyQuery(StringFieldQuery):
@classmethod
def string_match(cls, pattern, val):
# smartcase
if pattern.islower():
val = val.lower()
query_matcher = difflib.SequenceMatcher(None, pattern, val)
threshold = config['fuzzy']['threshold'].as_number()
return query_matcher.quick_ratio() >= threshold
class FuzzyPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'prefix': '~',
'threshold': 0.7,
})
def queries(self):
prefix = self.config['prefix'].as_str()
return {prefix: FuzzyQuery}
| 1,465
|
Python
|
.py
| 38
| 33.868421
| 71
| 0.70895
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,649
|
albumtypes.py
|
rembo10_headphones/lib/beetsplug/albumtypes.py
|
# This file is part of beets.
# Copyright 2021, Edgars Supe.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds an album template field for formatted album types."""
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.library import Album
from beets.plugins import BeetsPlugin
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""
def __init__(self):
"""Init AlbumTypesPlugin."""
super().__init__()
self.album_template_fields['atypes'] = self._atypes
self.config.add({
'types': [
('ep', 'EP'),
('single', 'Single'),
('soundtrack', 'OST'),
('live', 'Live'),
('compilation', 'Anthology'),
('remix', 'Remix')
],
'ignore_va': ['compilation'],
'bracket': '[]'
})
def _atypes(self, item: Album):
"""Returns a formatted string based on album's types."""
types = self.config['types'].as_pairs()
ignore_va = self.config['ignore_va'].as_str_seq()
bracket = self.config['bracket'].as_str()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = ''
bracket_r = ''
res = ''
albumtypes = item.albumtypes.split('; ')
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:
if not is_va or (type[0] not in ignore_va and is_va):
res += f'{bracket_l}{type[1]}{bracket_r}'
return res
| 2,309
|
Python
|
.py
| 55
| 33.745455
| 78
| 0.60918
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,650
|
subsonicupdate.py
|
rembo10_headphones/lib/beetsplug/subsonicupdate.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates Subsonic library on Beets import
Your Beets configuration file should contain
a "subsonic" section like the following:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: token
For older Subsonic versions, token authentication
is not supported, use password instead:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: pass
"""
import hashlib
import random
import string
import requests
from binascii import hexlify
from beets import config
from beets.plugins import BeetsPlugin
__author__ = 'https://github.com/maffo999'
class SubsonicUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Set default configuration values
config['subsonic'].add({
'user': 'admin',
'pass': 'admin',
'url': 'http://localhost:4040',
'auth': 'token',
})
config['subsonic']['pass'].redact = True
self.register_listener('import', self.start_scan)
@staticmethod
def __create_token():
"""Create salt and token from given password.
:return: The generated salt and hashed token
"""
password = config['subsonic']['pass'].as_str()
# Pick the random sequence and salt the password
r = string.ascii_letters + string.digits
salt = "".join([random.choice(r) for _ in range(6)])
salted_password = password + salt
token = hashlib.md5(salted_password.encode('utf-8')).hexdigest()
# Put together the payload of the request to the server and the URL
return salt, token
@staticmethod
def __format_url(endpoint):
"""Get the Subsonic URL to trigger the given endpoint.
Uses either the url config option or the deprecated host, port,
and context_path config options together.
:return: Endpoint for updating Subsonic
"""
url = config['subsonic']['url'].as_str()
if url and url.endswith('/'):
url = url[:-1]
# @deprecated("Use url config option instead")
if not url:
host = config['subsonic']['host'].as_str()
port = config['subsonic']['port'].get(int)
context_path = config['subsonic']['contextpath'].as_str()
if context_path == '/':
context_path = ''
url = f"http://{host}:{port}{context_path}"
return url + f'/rest/{endpoint}'
def start_scan(self):
user = config['subsonic']['user'].as_str()
auth = config['subsonic']['auth'].as_str()
url = self.__format_url("startScan")
self._log.debug('URL is {0}', url)
self._log.debug('auth type is {0}', config['subsonic']['auth'])
if auth == "token":
salt, token = self.__create_token()
payload = {
'u': user,
't': token,
's': salt,
'v': '1.13.0', # Subsonic 5.3 and newer
'c': 'beets',
'f': 'json'
}
elif auth == "password":
password = config['subsonic']['pass'].as_str()
encpass = hexlify(password.encode()).decode()
payload = {
'u': user,
'p': f'enc:{encpass}',
'v': '1.12.0',
'c': 'beets',
'f': 'json'
}
else:
return
try:
response = requests.get(url, params=payload)
json = response.json()
if response.status_code == 200 and \
json['subsonic-response']['status'] == "ok":
count = json['subsonic-response']['scanStatus']['count']
self._log.info(
f'Updating Subsonic; scanning {count} tracks')
elif response.status_code == 200 and \
json['subsonic-response']['status'] == "failed":
error_message = json['subsonic-response']['error']['message']
self._log.error(f'Error: {error_message}')
else:
self._log.error('Error: {0}', json)
except Exception as error:
self._log.error(f'Error: {error}')
| 4,956
|
Python
|
.py
| 125
| 30.4
| 77
| 0.581671
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,651
|
loadext.py
|
rembo10_headphones/lib/beetsplug/loadext.py
|
# This file is part of beets.
# Copyright 2019, Jack Wilsdon <jack.wilsdon@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Load SQLite extensions.
"""
from beets.dbcore import Database
from beets.plugins import BeetsPlugin
import sqlite3
class LoadExtPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
if not Database.supports_extensions:
self._log.warn('loadext is enabled but the current SQLite '
'installation does not support extensions')
return
self.register_listener('library_opened', self.library_opened)
def library_opened(self, lib):
for v in self.config:
ext = v.as_filename()
self._log.debug('loading extension {}', ext)
try:
lib.load_extension(ext)
except sqlite3.OperationalError as e:
self._log.error('failed to load extension {}: {}', ext, e)
| 1,496
|
Python
|
.py
| 34
| 37.676471
| 74
| 0.692837
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,652
|
convert.py
|
rembo10_headphones/lib/beetsplug/convert.py
|
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
from beets.util import par_map, decode_commandline_path, arg_encoding
import os
import threading
import subprocess
import tempfile
import shlex
from string import Template
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from confuse import ConfigTypeError
from beets import art
from beets.util.artresizer import ArtResizer
from beets.library import parse_query_string
from beets.library import Item
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
'wma': 'windows media',
'vorbis': 'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav', 'aiff']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
ext_dot = b'.' + ext
return os.path.splitext(path)[0] + ext_dot
def get_format(fmt=None):
"""Return the command template and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].as_str().lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info.get('extension', fmt)
except KeyError:
raise ui.UserError(
'convert: format {} needs the "command" field'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(str)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].as_str()
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = 'ffmpeg -i $source -y {} $dest'.format(
config['convert']['opts'].as_str()
)
if 'extension' in keys:
extension = config['convert']['extension'].as_str()
return (command.encode('utf-8'), extension.encode('utf-8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
no_convert_queries = config['convert']['no_convert'].as_str_seq()
if no_convert_queries:
for query_string in no_convert_queries:
query, _ = parse_query_string(query_string, Item)
if query.match(item):
return False
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'dest': None,
'pretend': False,
'link': False,
'hardlink': False,
'threads': util.cpu_count(),
'format': 'mp3',
'id3v23': 'inherit',
'formats': {
'aac': {
'command': 'ffmpeg -i $source -y -vn -acodec aac '
'-aq 1 $dest',
'extension': 'm4a',
},
'alac': {
'command': 'ffmpeg -i $source -y -vn -acodec alac $dest',
'extension': 'm4a',
},
'flac': 'ffmpeg -i $source -y -vn -acodec flac $dest',
'mp3': 'ffmpeg -i $source -y -vn -aq 2 $dest',
'opus':
'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
'ogg':
'ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest',
'wma':
'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
'max_bitrate': 500,
'auto': False,
'tmpdir': None,
'quiet': False,
'embed': True,
'paths': {},
'no_convert': '',
'never_convert_lossy_files': False,
'copy_album_art': False,
'album_art_maxwidth': 0,
'delete_originals': False,
})
self.early_import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help='convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help='change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help='keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help='set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help='set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help='do not ask for confirmation')
cmd.parser.add_option('-l', '--link', action='store_true', dest='link',
help='symlink files that do not \
need transcoding.')
cmd.parser.add_option('-H', '--hardlink', action='store_true',
dest='hardlink',
help='hardlink files that do not \
need transcoding. Overrides --link.')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
par_map(lambda item: self.convert_on_import(config.lib, item),
task.imported_items())
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info('Encoding {0}', util.displayable_path(source))
command = command.decode(arg_encoding(), 'surrogateescape')
source = decode_commandline_path(source)
dest = decode_commandline_path(dest)
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
encode_cmd = []
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
'source': source,
'dest': dest,
})
encode_cmd.append(args[i].encode(util.arg_encoding()))
if pretend:
self._log.info('{0}', ' '.join(ui.decargs(args)))
return
try:
util.command_output(encode_cmd)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info('Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug('Command {0} exited with status {1}: {2}',
args,
exc.returncode,
exc.output)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
"convert: couldn't invoke '{}': {}".format(
' '.join(ui.decargs(args)), exc
)
)
if not quiet and not pretend:
self._log.info('Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False, link=False, hardlink=False):
"""A pipeline thread that converts `Item` objects from a
library.
"""
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info('Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info('mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info('Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
linked = False
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
linked = link or hardlink
if pretend:
msg = 'ln' if hardlink else ('ln -s' if link else 'cp')
self._log.info('{2} {0} {1}',
util.displayable_path(original),
util.displayable_path(converted),
msg)
else:
# No transcoding necessary.
msg = 'Hardlinking' if hardlink \
else ('Linking' if link else 'Copying')
self._log.info('{1} {0}',
util.displayable_path(item.path),
msg)
if hardlink:
util.hardlink(original, converted)
elif link:
util.link(original, converted)
else:
util.copy(original, converted)
if pretend:
continue
id3v23 = self.config['id3v23'].as_choice([True, False, 'inherit'])
if id3v23 == 'inherit':
id3v23 = None
# Write tags from the database to the converted file.
item.try_write(path=converted, id3v23=id3v23)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed'] and not linked:
album = item._cached_album
if album and album.artpath:
self._log.debug('embedding album art from {}',
util.displayable_path(album.artpath))
art.embed_item(self._log, item, album.artpath,
itempath=converted, id3v23=id3v23)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False,
link=False, hardlink=False):
"""Copies or converts the associated cover art of the album. Album must
have at least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info('Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
# Decide whether we need to resize the cover-art image.
resize = False
maxwidth = None
if self.config['album_art_maxwidth']:
maxwidth = self.config['album_art_maxwidth'].get(int)
size = ArtResizer.shared.get_size(album.artpath)
self._log.debug('image size: {}', size)
if size:
resize = size[0] > maxwidth
else:
self._log.warning('Could not get size of image (please see '
'documentation for dependencies).')
# Either copy or resize (while copying) the image.
if resize:
self._log.info('Resizing cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
if not pretend:
ArtResizer.shared.resize(maxwidth, album.artpath, dest)
else:
if pretend:
msg = 'ln' if hardlink else ('ln -s' if link else 'cp')
self._log.info('{2} {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest),
msg)
else:
msg = 'Hardlinking' if hardlink \
else ('Linking' if link else 'Copying')
self._log.info('{2} cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest),
msg)
if hardlink:
util.hardlink(album.artpath, dest)
elif link:
util.link(album.artpath, dest)
else:
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
dest = opts.dest or self.config['dest'].get()
if not dest:
raise ui.UserError('no convert destination set')
dest = util.bytestring_path(dest)
threads = opts.threads or self.config['threads'].get(int)
path_formats = ui.get_path_formats(self.config['paths'] or None)
fmt = opts.format or self.config['format'].as_str().lower()
if opts.pretend is not None:
pretend = opts.pretend
else:
pretend = self.config['pretend'].get(bool)
if opts.hardlink is not None:
hardlink = opts.hardlink
link = False
elif opts.link is not None:
hardlink = False
link = opts.link
else:
hardlink = self.config['hardlink'].get(bool)
link = self.config['link'].get(bool)
if opts.album:
albums = lib.albums(ui.decargs(args))
items = [i for a in albums for i in a.items()]
if not pretend:
for a in albums:
ui.print_(format(a, ''))
else:
items = list(lib.items(ui.decargs(args)))
if not pretend:
for i in items:
ui.print_(format(i, ''))
if not items:
self._log.error('Empty query result.')
return
if not (pretend or opts.yes or ui.input_yn("Convert? (Y/n)")):
return
if opts.album and self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, dest, path_formats, pretend,
link, hardlink)
convert = [self.convert_item(dest,
opts.keep_new,
path_formats,
fmt,
pretend,
link,
hardlink)
for _ in range(threads)]
pipe = util.pipeline.Pipeline([iter(items), convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].as_str().lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get()
if tmpdir:
tmpdir = util.py3_path(util.bytestring_path(tmpdir))
fd, dest = tempfile.mkstemp(util.py3_path(b'.' + ext), dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
# Convert.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
# Change the newly-imported database entry to point to the
# converted file.
source_path = item.path
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
if self.config['delete_originals']:
self._log.info('Removing original file {0}', source_path)
util.remove(source_path, False)
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)
| 20,593
|
Python
|
.py
| 461
| 30.661605
| 79
| 0.530036
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,653
|
playlist.py
|
rembo10_headphones/lib/beetsplug/playlist.py
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import fnmatch
import tempfile
import beets
from beets.util import path_as_posix
class PlaylistQuery(beets.dbcore.Query):
"""Matches files listed by a playlist file.
"""
def __init__(self, pattern):
self.pattern = pattern
config = beets.config['playlist']
# Get the full path to the playlist
playlist_paths = (
pattern,
os.path.abspath(os.path.join(
config['playlist_dir'].as_filename(),
f'{pattern}.m3u',
)),
)
self.paths = []
for playlist_path in playlist_paths:
if not fnmatch.fnmatch(playlist_path, '*.[mM]3[uU]'):
# This is not am M3U playlist, skip this candidate
continue
try:
f = open(beets.util.syspath(playlist_path), mode='rb')
except OSError:
continue
if config['relative_to'].get() == 'library':
relative_to = beets.config['directory'].as_filename()
elif config['relative_to'].get() == 'playlist':
relative_to = os.path.dirname(playlist_path)
else:
relative_to = config['relative_to'].as_filename()
relative_to = beets.util.bytestring_path(relative_to)
for line in f:
if line[0] == '#':
# ignore comments, and extm3u extension
continue
self.paths.append(beets.util.normpath(
os.path.join(relative_to, line.rstrip())
))
f.close()
break
def col_clause(self):
if not self.paths:
# Playlist is empty
return '0', ()
clause = 'path IN ({})'.format(', '.join('?' for path in self.paths))
return clause, (beets.library.BLOB_TYPE(p) for p in self.paths)
def match(self, item):
return item.path in self.paths
class PlaylistPlugin(beets.plugins.BeetsPlugin):
item_queries = {'playlist': PlaylistQuery}
def __init__(self):
super().__init__()
self.config.add({
'auto': False,
'playlist_dir': '.',
'relative_to': 'library',
'forward_slash': False,
})
self.playlist_dir = self.config['playlist_dir'].as_filename()
self.changes = {}
if self.config['relative_to'].get() == 'library':
self.relative_to = beets.util.bytestring_path(
beets.config['directory'].as_filename())
elif self.config['relative_to'].get() != 'playlist':
self.relative_to = beets.util.bytestring_path(
self.config['relative_to'].as_filename())
else:
self.relative_to = None
if self.config['auto']:
self.register_listener('item_moved', self.item_moved)
self.register_listener('item_removed', self.item_removed)
self.register_listener('cli_exit', self.cli_exit)
def item_moved(self, item, source, destination):
self.changes[source] = destination
def item_removed(self, item):
if not os.path.exists(beets.util.syspath(item.path)):
self.changes[item.path] = None
def cli_exit(self, lib):
for playlist in self.find_playlists():
self._log.info(f'Updating playlist: {playlist}')
base_dir = beets.util.bytestring_path(
self.relative_to if self.relative_to
else os.path.dirname(playlist)
)
try:
self.update_playlist(playlist, base_dir)
except beets.util.FilesystemError:
self._log.error('Failed to update playlist: {}'.format(
beets.util.displayable_path(playlist)))
def find_playlists(self):
"""Find M3U playlists in the playlist directory."""
try:
dir_contents = os.listdir(beets.util.syspath(self.playlist_dir))
except OSError:
self._log.warning('Unable to open playlist directory {}'.format(
beets.util.displayable_path(self.playlist_dir)))
return
for filename in dir_contents:
if fnmatch.fnmatch(filename, '*.[mM]3[uU]'):
yield os.path.join(self.playlist_dir, filename)
def update_playlist(self, filename, base_dir):
"""Find M3U playlists in the specified directory."""
changes = 0
deletions = 0
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tempfp:
new_playlist = tempfp.name
with open(filename, mode='rb') as fp:
for line in fp:
original_path = line.rstrip(b'\r\n')
# Ensure that path from playlist is absolute
is_relative = not os.path.isabs(line)
if is_relative:
lookup = os.path.join(base_dir, original_path)
else:
lookup = original_path
try:
new_path = self.changes[beets.util.normpath(lookup)]
except KeyError:
if self.config['forward_slash']:
line = path_as_posix(line)
tempfp.write(line)
else:
if new_path is None:
# Item has been deleted
deletions += 1
continue
changes += 1
if is_relative:
new_path = os.path.relpath(new_path, base_dir)
line = line.replace(original_path, new_path)
if self.config['forward_slash']:
line = path_as_posix(line)
tempfp.write(line)
if changes or deletions:
self._log.info(
'Updated playlist {} ({} changes, {} deletions)'.format(
filename, changes, deletions))
beets.util.copy(new_playlist, filename, replace=True)
beets.util.remove(new_playlist)
| 6,841
|
Python
|
.py
| 155
| 31.174194
| 77
| 0.554688
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,654
|
missing.py
|
rembo10_headphones/lib/beetsplug/missing.py
|
# This file is part of beets.
# Copyright 2016, Pedro Silva.
# Copyright 2017, Quentin Young.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List missing tracks.
"""
import musicbrainzngs
from musicbrainzngs.musicbrainz import MusicBrainzError
from collections import defaultdict
from beets.autotag import hooks
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand
from beets import config
from beets.dbcore import types
def _missing_count(album):
"""Return number of missing items in `album`.
"""
return (album.albumtotal or 0) - len(album.items())
def _item(track_info, album_info, album_id):
"""Build and return `item` from `track_info` and `album info`
objects. `item` is missing what fields cannot be obtained from
MusicBrainz alone (encoder, rg_track_gain, rg_track_peak,
rg_album_gain, rg_album_peak, original_year, original_month,
original_day, length, bitrate, format, samplerate, bitdepth,
channels, mtime.)
"""
t = track_info
a = album_info
return Item(**{
'album_id': album_id,
'album': a.album,
'albumartist': a.artist,
'albumartist_credit': a.artist_credit,
'albumartist_sort': a.artist_sort,
'albumdisambig': a.albumdisambig,
'albumstatus': a.albumstatus,
'albumtype': a.albumtype,
'artist': t.artist,
'artist_credit': t.artist_credit,
'artist_sort': t.artist_sort,
'asin': a.asin,
'catalognum': a.catalognum,
'comp': a.va,
'country': a.country,
'day': a.day,
'disc': t.medium,
'disctitle': t.disctitle,
'disctotal': a.mediums,
'label': a.label,
'language': a.language,
'length': t.length,
'mb_albumid': a.album_id,
'mb_artistid': t.artist_id,
'mb_releasegroupid': a.releasegroup_id,
'mb_trackid': t.track_id,
'media': t.media,
'month': a.month,
'script': a.script,
'title': t.title,
'track': t.index,
'tracktotal': len(a.tracks),
'year': a.year,
})
class MissingPlugin(BeetsPlugin):
"""List missing tracks
"""
album_types = {
'missing': types.INTEGER,
}
def __init__(self):
super().__init__()
self.config.add({
'count': False,
'total': False,
'album': False,
})
self.album_template_fields['missing'] = _missing_count
self._command = Subcommand('missing',
help=__doc__,
aliases=['miss'])
self._command.parser.add_option(
'-c', '--count', dest='count', action='store_true',
help='count missing tracks per album')
self._command.parser.add_option(
'-t', '--total', dest='total', action='store_true',
help='count total of missing tracks')
self._command.parser.add_option(
'-a', '--album', dest='album', action='store_true',
help='show missing albums for artist instead of tracks')
self._command.parser.add_format_option()
def commands(self):
def _miss(lib, opts, args):
self.config.set_args(opts)
albms = self.config['album'].get()
helper = self._missing_albums if albms else self._missing_tracks
helper(lib, decargs(args))
self._command.func = _miss
return [self._command]
def _missing_tracks(self, lib, query):
"""Print a listing of tracks missing from each album in the library
matching query.
"""
albums = lib.albums(query)
count = self.config['count'].get()
total = self.config['total'].get()
fmt = config['format_album' if count else 'format_item'].get()
if total:
print(sum([_missing_count(a) for a in albums]))
return
# Default format string for count mode.
if count:
fmt += ': $missing'
for album in albums:
if count:
if _missing_count(album):
print_(format(album, fmt))
else:
for item in self._missing(album):
print_(format(item, fmt))
def _missing_albums(self, lib, query):
"""Print a listing of albums missing from each artist in the library
matching query.
"""
total = self.config['total'].get()
albums = lib.albums(query)
# build dict mapping artist to list of their albums in library
albums_by_artist = defaultdict(list)
for alb in albums:
artist = (alb['albumartist'], alb['mb_albumartistid'])
albums_by_artist[artist].append(alb)
total_missing = 0
# build dict mapping artist to list of all albums
for artist, albums in albums_by_artist.items():
if artist[1] is None or artist[1] == "":
albs_no_mbid = ["'" + a['album'] + "'" for a in albums]
self._log.info(
"No musicbrainz ID for artist '{}' found in album(s) {}; "
"skipping", artist[0], ", ".join(albs_no_mbid)
)
continue
try:
resp = musicbrainzngs.browse_release_groups(artist=artist[1])
release_groups = resp['release-group-list']
except MusicBrainzError as err:
self._log.info(
"Couldn't fetch info for artist '{}' ({}) - '{}'",
artist[0], artist[1], err
)
continue
missing = []
present = []
for rg in release_groups:
missing.append(rg)
for alb in albums:
if alb['mb_releasegroupid'] == rg['id']:
missing.remove(rg)
present.append(rg)
break
total_missing += len(missing)
if total:
continue
missing_titles = {rg['title'] for rg in missing}
for release_title in missing_titles:
print_("{} - {}".format(artist[0], release_title))
if total:
print(total_missing)
def _missing(self, album):
"""Query MusicBrainz to determine items missing from `album`.
"""
item_mbids = [x.mb_trackid for x in album.items()]
if len(list(album.items())) < album.albumtotal:
# fetch missing items
# TODO: Implement caching that without breaking other stuff
album_info = hooks.album_for_mbid(album.mb_albumid)
for track_info in getattr(album_info, 'tracks', []):
if track_info.track_id not in item_mbids:
item = _item(track_info, album_info, album.id)
self._log.debug('track {0} in album {1}',
track_info.track_id, album_info.album_id)
yield item
| 7,991
|
Python
|
.py
| 191
| 31.596859
| 78
| 0.548873
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,655
|
lastimport.py
|
rembo10_headphones/lib/beetsplug/lastimport.py
|
# This file is part of beets.
# Copyright 2016, Rafael Bodill https://github.com/rafi
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import pylast
from pylast import TopItem, _extract, _number
from beets import ui
from beets import dbcore
from beets import config
from beets import plugins
from beets.dbcore import types
API_URL = 'https://ws.audioscrobbler.com/2.0/'
class LastImportPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
config['lastfm'].add({
'user': '',
'api_key': plugins.LASTFM_KEY,
})
config['lastfm']['api_key'].redact = True
self.config.add({
'per_page': 500,
'retry_limit': 3,
})
self.item_types = {
'play_count': types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand('lastimport', help='import last.fm play-count')
def func(lib, opts, args):
import_lastfm(lib, self._log)
cmd.func = func
return [cmd]
class CustomUser(pylast.User):
""" Custom user class derived from pylast.User, and overriding the
_get_things method to return MBID and album. Also introduces new
get_top_tracks_by_page method to allow access to more than one page of top
tracks.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_things(self, method, thing, thing_type, params=None,
cacheable=True):
"""Returns a list of the most played thing_types by this thing, in a
tuple with the total number of pages of results. Includes an MBID, if
found.
"""
doc = self._request(
self.ws_prefix + "." + method, cacheable, params)
toptracks_node = doc.getElementsByTagName('toptracks')[0]
total_pages = int(toptracks_node.getAttribute('totalPages'))
seq = []
for node in doc.getElementsByTagName(thing):
title = _extract(node, "name")
artist = _extract(node, "name", 1)
mbid = _extract(node, "mbid")
playcount = _number(_extract(node, "playcount"))
thing = thing_type(artist, title, self.network)
thing.mbid = mbid
seq.append(TopItem(thing, playcount))
return seq, total_pages
def get_top_tracks_by_page(self, period=pylast.PERIOD_OVERALL, limit=None,
page=1, cacheable=True):
"""Returns the top tracks played by a user, in a tuple with the total
number of pages of results.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_1MONTH
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
params['page'] = page
if limit:
params['limit'] = limit
return self._get_things(
"getTopTracks", "track", pylast.Track, params, cacheable)
def import_lastfm(lib, log):
user = config['lastfm']['user'].as_str()
per_page = config['lastimport']['per_page'].get(int)
if not user:
raise ui.UserError('You must specify a user name for lastimport')
log.info('Fetching last.fm library for @{0}', user)
page_total = 1
page_current = 0
found_total = 0
unknown_total = 0
retry_limit = config['lastimport']['retry_limit'].get(int)
# Iterate through a yet to be known page total count
while page_current < page_total:
log.info('Querying page #{0}{1}...',
page_current + 1,
f'/{page_total}' if page_total > 1 else '')
for retry in range(0, retry_limit):
tracks, page_total = fetch_tracks(user, page_current + 1, per_page)
if page_total < 1:
# It means nothing to us!
raise ui.UserError('Last.fm reported no data.')
if tracks:
found, unknown = process_tracks(lib, tracks, log)
found_total += found
unknown_total += unknown
break
else:
log.error('ERROR: unable to read page #{0}',
page_current + 1)
if retry < retry_limit:
log.info(
'Retrying page #{0}... ({1}/{2} retry)',
page_current + 1, retry + 1, retry_limit
)
else:
log.error('FAIL: unable to fetch page #{0}, ',
'tried {1} times', page_current, retry + 1)
page_current += 1
log.info('... done!')
log.info('finished processing {0} song pages', page_total)
log.info('{0} unknown play-counts', unknown_total)
log.info('{0} play-counts imported', found_total)
def fetch_tracks(user, page, limit):
""" JSON format:
[
{
"mbid": "...",
"artist": "...",
"title": "...",
"playcount": "..."
}
]
"""
network = pylast.LastFMNetwork(api_key=config['lastfm']['api_key'])
user_obj = CustomUser(user, network)
results, total_pages =\
user_obj.get_top_tracks_by_page(limit=limit, page=page)
return [
{
"mbid": track.item.mbid if track.item.mbid else '',
"artist": {
"name": track.item.artist.name
},
"name": track.item.title,
"playcount": track.weight
} for track in results
], total_pages
def process_tracks(lib, tracks, log):
total = len(tracks)
total_found = 0
total_fails = 0
log.info('Received {0} tracks in this page, processing...', total)
for num in range(0, total):
song = None
trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip()
title = tracks[num]['name'].strip()
album = ''
if 'album' in tracks[num]:
album = tracks[num]['album'].get('name', '').strip()
log.debug('query: {0} - {1} ({2})', artist, title, album)
# First try to query by musicbrainz's trackid
if trackid:
song = lib.items(
dbcore.query.MatchQuery('mb_trackid', trackid)
).get()
# If not, try just artist/title
if song is None:
log.debug('no album match, trying by artist/title')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
# Last resort, try just replacing to utf-8 quote
if song is None:
title = title.replace("'", '\u2019')
log.debug('no title match, trying utf-8 single quote')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
if song is not None:
count = int(song.get('play_count', 0))
new_count = int(tracks[num]['playcount'])
log.debug('match: {0} - {1} ({2}) '
'updating: play_count {3} => {4}',
song.artist, song.title, song.album, count, new_count)
song['play_count'] = new_count
song.store()
total_found += 1
else:
total_fails += 1
log.info(' - No match: {0} - {1} ({2})',
artist, title, album)
if total_fails > 0:
log.info('Acquired {0}/{1} play-counts ({2} unknown)',
total_found, total, total_fails)
return total_found, total_fails
| 8,459
|
Python
|
.py
| 209
| 30.339713
| 79
| 0.563078
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,656
|
badfiles.py
|
rembo10_headphones/lib/beetsplug/badfiles.py
|
# This file is part of beets.
# Copyright 2016, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption.
"""
from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT
import shlex
import os
import errno
import sys
import confuse
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, par_map
from beets import ui
from beets import importer
class CheckerCommandException(Exception):
"""Raised when running a checker failed.
Attributes:
checker: Checker command name.
path: Path to the file being validated.
errno: Error number from the checker execution error.
msg: Message from the checker execution error.
"""
def __init__(self, cmd, oserror):
self.checker = cmd[0]
self.path = cmd[-1]
self.errno = oserror.errno
self.msg = str(oserror)
class BadFiles(BeetsPlugin):
def __init__(self):
super().__init__()
self.verbose = False
self.register_listener('import_task_start',
self.on_import_task_start)
self.register_listener('import_task_before_choice',
self.on_import_task_before_choice)
def run_command(self, cmd):
self._log.debug("running command: {}",
displayable_path(list2cmdline(cmd)))
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
raise CheckerCommandException(cmd, e)
output = output.decode(sys.getdefaultencoding(), 'replace')
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config['commands'].get(dict).get(ext)
except confuse.NotFoundError:
command = None
if command:
return self.check_custom(command)
if ext == "mp3":
return self.check_mp3val
if ext == "flac":
return self.check_flac
def check_item(self, item):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug("checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_("{}: file does not exist".format(
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')
checker = self.get_checker(ext)
if not checker:
self._log.error("no checker specified in the config for {}",
ext)
return []
path = item.path
if not isinstance(path, str):
path = item.path.decode(sys.getfilesystemencoding())
try:
status, errors, output = checker(path)
except CheckerCommandException as e:
if e.errno == errno.ENOENT:
self._log.error(
"command not found: {} when validating file: {}",
e.checker,
e.path
)
else:
self._log.error("error invoking {}: {}", e.checker, e.msg)
return []
error_lines = []
if status > 0:
error_lines.append(
"{}: checker exited with status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
error_lines.append(f" {line}")
elif errors > 0:
error_lines.append(
"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
error_lines.append(f" {line}")
elif self.verbose:
error_lines.append(
"{}: ok".format(ui.colorize('text_success', dpath)))
return error_lines
def on_import_task_start(self, task, session):
if not self.config['check_on_import'].get(False):
return
checks_failed = []
for item in task.items:
error_lines = self.check_item(item)
if error_lines:
checks_failed.append(error_lines)
if checks_failed:
task._badfiles_checks_failed = checks_failed
def on_import_task_before_choice(self, task, session):
if hasattr(task, '_badfiles_checks_failed'):
ui.print_('{} one or more files failed checks:'
.format(ui.colorize('text_warning', 'BAD')))
for error in task._badfiles_checks_failed:
for error_line in error:
ui.print_(error_line)
ui.print_()
ui.print_('What would you like to do?')
sel = ui.input_options(['aBort', 'skip', 'continue'])
if sel == 's':
return importer.action.SKIP
elif sel == 'c':
return None
elif sel == 'b':
raise importer.ImportAbort()
else:
raise Exception(f'Unexpected selection: {sel}')
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
self.verbose = opts.verbose
def check_and_print(item):
for error_line in self.check_item(item):
ui.print_(error_line)
par_map(check_and_print, items)
def commands(self):
bad_command = Subcommand('bad',
help='check for corrupt or missing files')
bad_command.parser.add_option(
'-v', '--verbose',
action='store_true', default=False, dest='verbose',
help='view results for both the bad and uncorrupted files'
)
bad_command.func = self.command
return [bad_command]
| 7,395
|
Python
|
.py
| 181
| 30.425414
| 77
| 0.58649
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,657
|
spotify.py
|
rembo10_headphones/lib/beetsplug/spotify.py
|
# This file is part of beets.
# Copyright 2019, Rahul Ahuja.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Spotify release and track search support to the autotagger, along with
Spotify playlist construction.
"""
import re
import json
import base64
import webbrowser
import collections
import unidecode
import requests
import confuse
from beets import ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.plugins import MetadataSourcePlugin, BeetsPlugin
class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin):
data_source = 'Spotify'
# Base URLs for the Spotify API
# Documentation: https://developer.spotify.com/web-api
oauth_token_url = 'https://accounts.spotify.com/api/token'
open_track_url = 'https://open.spotify.com/track/'
search_url = 'https://api.spotify.com/v1/search'
album_url = 'https://api.spotify.com/v1/albums/'
track_url = 'https://api.spotify.com/v1/tracks/'
# Spotify IDs consist of 22 alphanumeric characters
# (zero-left-padded base62 representation of randomly generated UUID4)
id_regex = {
'pattern': r'(^|open\.spotify\.com/{}/)([0-9A-Za-z]{{22}})',
'match_group': 2,
}
def __init__(self):
super().__init__()
self.config.add(
{
'mode': 'list',
'tiebreak': 'popularity',
'show_failures': False,
'artist_field': 'albumartist',
'album_field': 'album',
'track_field': 'title',
'region_filter': None,
'regex': [],
'client_id': '4e414367a1d14c75a5c5129a627fcab8',
'client_secret': 'f82bdc09b2254f1a8286815d02fd46dc',
'tokenfile': 'spotify_token.json',
}
)
self.config['client_secret'].redact = True
self.tokenfile = self.config['tokenfile'].get(
confuse.Filename(in_app_dir=True)
) # Path to the JSON file for storing the OAuth access token.
self.setup()
def setup(self):
"""Retrieve previously saved OAuth token or generate a new one."""
try:
with open(self.tokenfile) as f:
token_data = json.load(f)
except OSError:
self._authenticate()
else:
self.access_token = token_data['access_token']
def _authenticate(self):
"""Request an access token via the Client Credentials Flow:
https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow
"""
headers = {
'Authorization': 'Basic {}'.format(
base64.b64encode(
':'.join(
self.config[k].as_str()
for k in ('client_id', 'client_secret')
).encode()
).decode()
)
}
response = requests.post(
self.oauth_token_url,
data={'grant_type': 'client_credentials'},
headers=headers,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise ui.UserError(
'Spotify authorization failed: {}\n{}'.format(
e, response.text
)
)
self.access_token = response.json()['access_token']
# Save the token for later use.
self._log.debug(
'{} access token: {}', self.data_source, self.access_token
)
with open(self.tokenfile, 'w') as f:
json.dump({'access_token': self.access_token}, f)
def _handle_response(self, request_type, url, params=None):
"""Send a request, reauthenticating if necessary.
:param request_type: Type of :class:`Request` constructor,
e.g. ``requests.get``, ``requests.post``, etc.
:type request_type: function
:param url: URL for the new :class:`Request` object.
:type url: str
:param params: (optional) list of tuples or bytes to send
in the query string for the :class:`Request`.
:type params: dict
:return: JSON data for the class:`Response <Response>` object.
:rtype: dict
"""
response = request_type(
url,
headers={'Authorization': f'Bearer {self.access_token}'},
params=params,
)
if response.status_code != 200:
if 'token expired' in response.text:
self._log.debug(
'{} access token has expired. Reauthenticating.',
self.data_source,
)
self._authenticate()
return self._handle_response(request_type, url, params=params)
else:
raise ui.UserError(
'{} API error:\n{}\nURL:\n{}\nparams:\n{}'.format(
self.data_source, response.text, url, params
)
)
return response.json()
def album_for_id(self, album_id):
"""Fetch an album by its Spotify ID or URL and return an
AlbumInfo object or None if the album is not found.
:param album_id: Spotify ID or URL for the album
:type album_id: str
:return: AlbumInfo object for album
:rtype: beets.autotag.hooks.AlbumInfo or None
"""
spotify_id = self._get_id('album', album_id)
if spotify_id is None:
return None
album_data = self._handle_response(
requests.get, self.album_url + spotify_id
)
artist, artist_id = self.get_artist(album_data['artists'])
date_parts = [
int(part) for part in album_data['release_date'].split('-')
]
release_date_precision = album_data['release_date_precision']
if release_date_precision == 'day':
year, month, day = date_parts
elif release_date_precision == 'month':
year, month = date_parts
day = None
elif release_date_precision == 'year':
year = date_parts[0]
month = None
day = None
else:
raise ui.UserError(
"Invalid `release_date_precision` returned "
"by {} API: '{}'".format(
self.data_source, release_date_precision
)
)
tracks = []
medium_totals = collections.defaultdict(int)
for i, track_data in enumerate(album_data['tracks']['items'], start=1):
track = self._get_track(track_data)
track.index = i
medium_totals[track.medium] += 1
tracks.append(track)
for track in tracks:
track.medium_total = medium_totals[track.medium]
return AlbumInfo(
album=album_data['name'],
album_id=spotify_id,
artist=artist,
artist_id=artist_id,
tracks=tracks,
albumtype=album_data['album_type'],
va=len(album_data['artists']) == 1
and artist.lower() == 'various artists',
year=year,
month=month,
day=day,
label=album_data['label'],
mediums=max(medium_totals.keys()),
data_source=self.data_source,
data_url=album_data['external_urls']['spotify'],
)
def _get_track(self, track_data):
"""Convert a Spotify track object dict to a TrackInfo object.
:param track_data: Simplified track object
(https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified)
:type track_data: dict
:return: TrackInfo object for track
:rtype: beets.autotag.hooks.TrackInfo
"""
artist, artist_id = self.get_artist(track_data['artists'])
return TrackInfo(
title=track_data['name'],
track_id=track_data['id'],
artist=artist,
artist_id=artist_id,
length=track_data['duration_ms'] / 1000,
index=track_data['track_number'],
medium=track_data['disc_number'],
medium_index=track_data['track_number'],
data_source=self.data_source,
data_url=track_data['external_urls']['spotify'],
)
def track_for_id(self, track_id=None, track_data=None):
"""Fetch a track by its Spotify ID or URL and return a
TrackInfo object or None if the track is not found.
:param track_id: (Optional) Spotify ID or URL for the track. Either
``track_id`` or ``track_data`` must be provided.
:type track_id: str
:param track_data: (Optional) Simplified track object dict. May be
provided instead of ``track_id`` to avoid unnecessary API calls.
:type track_data: dict
:return: TrackInfo object for track
:rtype: beets.autotag.hooks.TrackInfo or None
"""
if track_data is None:
spotify_id = self._get_id('track', track_id)
if spotify_id is None:
return None
track_data = self._handle_response(
requests.get, self.track_url + spotify_id
)
track = self._get_track(track_data)
# Get album's tracks to set `track.index` (position on the entire
# release) and `track.medium_total` (total number of tracks on
# the track's disc).
album_data = self._handle_response(
requests.get, self.album_url + track_data['album']['id']
)
medium_total = 0
for i, track_data in enumerate(album_data['tracks']['items'], start=1):
if track_data['disc_number'] == track.medium:
medium_total += 1
if track_data['id'] == track.track_id:
track.index = i
track.medium_total = medium_total
return track
@staticmethod
def _construct_search_query(filters=None, keywords=''):
"""Construct a query string with the specified filters and keywords to
be provided to the Spotify Search API
(https://developer.spotify.com/documentation/web-api/reference/search/search/#writing-a-query---guidelines).
:param filters: (Optional) Field filters to apply.
:type filters: dict
:param keywords: (Optional) Query keywords to use.
:type keywords: str
:return: Query string to be provided to the Search API.
:rtype: str
"""
query_components = [
keywords,
' '.join(':'.join((k, v)) for k, v in filters.items()),
]
query = ' '.join([q for q in query_components if q])
if not isinstance(query, str):
query = query.decode('utf8')
return unidecode.unidecode(query)
def _search_api(self, query_type, filters=None, keywords=''):
"""Query the Spotify Search API for the specified ``keywords``, applying
the provided ``filters``.
:param query_type: Item type to search across. Valid types are:
'album', 'artist', 'playlist', and 'track'.
:type query_type: str
:param filters: (Optional) Field filters to apply.
:type filters: dict
:param keywords: (Optional) Query keywords to use.
:type keywords: str
:return: JSON data for the class:`Response <Response>` object or None
if no search results are returned.
:rtype: dict or None
"""
query = self._construct_search_query(
keywords=keywords, filters=filters
)
if not query:
return None
self._log.debug(
f"Searching {self.data_source} for '{query}'"
)
response_data = (
self._handle_response(
requests.get,
self.search_url,
params={'q': query, 'type': query_type},
)
.get(query_type + 's', {})
.get('items', [])
)
self._log.debug(
"Found {} result(s) from {} for '{}'",
len(response_data),
self.data_source,
query,
)
return response_data
def commands(self):
def queries(lib, opts, args):
success = self._parse_opts(opts)
if success:
results = self._match_library_tracks(lib, ui.decargs(args))
self._output_match_results(results)
spotify_cmd = ui.Subcommand(
'spotify', help=f'build a {self.data_source} playlist'
)
spotify_cmd.parser.add_option(
'-m',
'--mode',
action='store',
help='"open" to open {} with playlist, '
'"list" to print (default)'.format(self.data_source),
)
spotify_cmd.parser.add_option(
'-f',
'--show-failures',
action='store_true',
dest='show_failures',
help='list tracks that did not match a {} ID'.format(
self.data_source
),
)
spotify_cmd.func = queries
return [spotify_cmd]
def _parse_opts(self, opts):
if opts.mode:
self.config['mode'].set(opts.mode)
if opts.show_failures:
self.config['show_failures'].set(True)
if self.config['mode'].get() not in ['list', 'open']:
self._log.warning(
'{0} is not a valid mode', self.config['mode'].get()
)
return False
self.opts = opts
return True
def _match_library_tracks(self, library, keywords):
"""Get a list of simplified track object dicts for library tracks
matching the specified ``keywords``.
:param library: beets library object to query.
:type library: beets.library.Library
:param keywords: Query to match library items against.
:type keywords: str
:return: List of simplified track object dicts for library items
matching the specified query.
:rtype: list[dict]
"""
results = []
failures = []
items = library.items(keywords)
if not items:
self._log.debug(
'Your beets query returned no items, skipping {}.',
self.data_source,
)
return
self._log.info('Processing {} tracks...', len(items))
for item in items:
# Apply regex transformations if provided
for regex in self.config['regex'].get():
if (
not regex['field']
or not regex['search']
or not regex['replace']
):
continue
value = item[regex['field']]
item[regex['field']] = re.sub(
regex['search'], regex['replace'], value
)
# Custom values can be passed in the config (just in case)
artist = item[self.config['artist_field'].get()]
album = item[self.config['album_field'].get()]
keywords = item[self.config['track_field'].get()]
# Query the Web API for each track, look for the items' JSON data
query_filters = {'artist': artist, 'album': album}
response_data_tracks = self._search_api(
query_type='track', keywords=keywords, filters=query_filters
)
if not response_data_tracks:
query = self._construct_search_query(
keywords=keywords, filters=query_filters
)
failures.append(query)
continue
# Apply market filter if requested
region_filter = self.config['region_filter'].get()
if region_filter:
response_data_tracks = [
track_data
for track_data in response_data_tracks
if region_filter in track_data['available_markets']
]
if (
len(response_data_tracks) == 1
or self.config['tiebreak'].get() == 'first'
):
self._log.debug(
'{} track(s) found, count: {}',
self.data_source,
len(response_data_tracks),
)
chosen_result = response_data_tracks[0]
else:
# Use the popularity filter
self._log.debug(
'Most popular track chosen, count: {}',
len(response_data_tracks),
)
chosen_result = max(
response_data_tracks, key=lambda x: x['popularity']
)
results.append(chosen_result)
failure_count = len(failures)
if failure_count > 0:
if self.config['show_failures'].get():
self._log.info(
'{} track(s) did not match a {} ID:',
failure_count,
self.data_source,
)
for track in failures:
self._log.info('track: {}', track)
self._log.info('')
else:
self._log.warning(
'{} track(s) did not match a {} ID:\n'
'use --show-failures to display',
failure_count,
self.data_source,
)
return results
def _output_match_results(self, results):
"""Open a playlist or print Spotify URLs for the provided track
object dicts.
:param results: List of simplified track object dicts
(https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified)
:type results: list[dict]
"""
if results:
spotify_ids = [track_data['id'] for track_data in results]
if self.config['mode'].get() == 'open':
self._log.info(
'Attempting to open {} with playlist'.format(
self.data_source
)
)
spotify_url = 'spotify:trackset:Playlist:' + ','.join(
spotify_ids
)
webbrowser.open(spotify_url)
else:
for spotify_id in spotify_ids:
print(self.open_track_url + spotify_id)
else:
self._log.warning(
f'No {self.data_source} tracks found from beets query'
)
| 19,323
|
Python
|
.py
| 475
| 28.825263
| 116
| 0.550436
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,658
|
fromfilename.py
|
rembo10_headphones/lib/beetsplug/fromfilename.py
|
# This file is part of beets.
# Copyright 2016, Jan-Erik Dahlin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""If the title is empty, try to extract track and title from the
filename.
"""
from beets import plugins
from beets.util import displayable_path
import os
import re
# Filename field extraction patterns.
PATTERNS = [
# Useful patterns.
r'^(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<title>.+)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<title>.+)$',
r'^(?P<track>\d+)\s+(?P<title>.+)$',
r'^(?P<title>.+) by (?P<artist>.+)$',
r'^(?P<track>\d+).*$',
]
# Titles considered "empty" and in need of replacement.
BAD_TITLE_PATTERNS = [
r'^$',
]
def equal(seq):
"""Determine whether a sequence holds identical elements.
"""
return len(set(seq)) <= 1
def equal_fields(matchdict, field):
"""Do all items in `matchdict`, whose values are dictionaries, have
the same value for `field`? (If they do, the field is probably not
the title.)
"""
return equal(m[field] for m in matchdict.values())
def all_matches(names, pattern):
"""If all the filenames in the item/filename mapping match the
pattern, return a dictionary mapping the items to dictionaries
giving the value for each named subpattern in the match. Otherwise,
return None.
"""
matches = {}
for item, name in names.items():
m = re.match(pattern, name, re.IGNORECASE)
if m and m.groupdict():
# Only yield a match when the regex applies *and* has
# capture groups. Otherwise, no information can be extracted
# from the filename.
matches[item] = m.groupdict()
else:
return None
return matches
def bad_title(title):
"""Determine whether a given title is "bad" (empty or otherwise
meaningless) and in need of replacement.
"""
for pat in BAD_TITLE_PATTERNS:
if re.match(pat, title, re.IGNORECASE):
return True
return False
def apply_matches(d):
"""Given a mapping from items to field dicts, apply the fields to
the objects.
"""
some_map = list(d.values())[0]
keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames.
if 'tag' in keys and not equal_fields(d, 'tag'):
return
# Given both an "artist" and "title" field, assume that one is
# *actually* the artist, which must be uniform, and use the other
# for the title. This, of course, won't work for VA albums.
if 'artist' in keys:
if equal_fields(d, 'artist'):
artist = some_map['artist']
title_field = 'title'
elif equal_fields(d, 'title'):
artist = some_map['title']
title_field = 'artist'
else:
# Both vary. Abort.
return
for item in d:
if not item.artist:
item.artist = artist
# No artist field: remaining field is the title.
else:
title_field = 'title'
# Apply the title and track.
for item in d:
if bad_title(item.title):
item.title = str(d[item][title_field])
if 'track' in d[item] and item.track == 0:
item.track = int(d[item]['track'])
# Plugin structure and hook into import process.
class FromFilenamePlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener('import_task_start', filename_task)
def filename_task(task, session):
"""Examine each item in the task to see if we can extract a title
from the filename. Try to match all filenames to a number of
regexps, starting with the most complex patterns and successively
trying less complex patterns. As soon as all filenames match the
same regex we can make an educated guess of which part of the
regex that contains the title.
"""
items = task.items if task.is_album else [task.item]
# Look for suspicious (empty or meaningless) titles.
missing_titles = sum(bad_title(i.title) for i in items)
if missing_titles:
# Get the base filenames (no path or extension).
names = {}
for item in items:
path = displayable_path(item.path)
name, _ = os.path.splitext(os.path.basename(path))
names[item] = name
# Look for useful information in the filenames.
for pattern in PATTERNS:
d = all_matches(names, pattern)
if d:
apply_matches(d)
| 5,240
|
Python
|
.py
| 134
| 33.119403
| 79
| 0.638637
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,659
|
kodiupdate.py
|
rembo10_headphones/lib/beetsplug/kodiupdate.py
|
# This file is part of beets.
# Copyright 2017, Pauli Kettunen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Kodi library whenever the beets library is changed.
This is based on the Plex Update plugin.
Put something like the following in your config.yaml to configure:
kodi:
host: localhost
port: 8080
user: user
pwd: secret
"""
import requests
from beets import config
from beets.plugins import BeetsPlugin
def update_kodi(host, port, user, password):
"""Sends request to the Kodi api to start a library refresh.
"""
url = f"http://{host}:{port}/jsonrpc"
"""Content-Type: application/json is mandatory
according to the kodi jsonrpc documentation"""
headers = {'Content-Type': 'application/json'}
# Create the payload. Id seems to be mandatory.
payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}
r = requests.post(
url,
auth=(user, password),
json=payload,
headers=headers)
return r
class KodiUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
config['kodi'].add({
'host': 'localhost',
'port': 8080,
'user': 'kodi',
'pwd': 'kodi'})
config['kodi']['pwd'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Kodi server.
"""
self._log.info('Requesting a Kodi library update...')
# Try to send update request.
try:
r = update_kodi(
config['kodi']['host'].get(),
config['kodi']['port'].get(),
config['kodi']['user'].get(),
config['kodi']['pwd'].get())
r.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.warning('Kodi update failed: {0}',
str(e))
return
json = r.json()
if json.get('result') != 'OK':
self._log.warning('Kodi update failed: JSON response was {0!r}',
json)
return
self._log.info('Kodi update triggered')
| 3,012
|
Python
|
.py
| 76
| 31.907895
| 77
| 0.624957
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,660
|
chroma.py
|
rembo10_headphones/lib/beetsplug/chroma.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Chromaprint/Acoustid acoustic fingerprinting support to the
autotagger. Requires the pyacoustid library.
"""
from beets import plugins
from beets import ui
from beets import util
from beets import config
from beets.autotag import hooks
import confuse
import acoustid
from collections import defaultdict
from functools import partial
import re
API_KEY = '1vOwZtEn'
SCORE_THRESH = 0.5
TRACK_ID_WEIGHT = 10.0
COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common?
MAX_RECORDINGS = 5
MAX_RELEASES = 5
# Stores the Acoustid match information for each track. This is
# populated when an import task begins and then used when searching for
# candidates. It maps audio file paths to (recording_ids, release_ids)
# pairs. If a given path is not present in the mapping, then no match
# was found.
_matches = {}
# Stores the fingerprint and Acoustid ID for each track. This is stored
# as metadata for each track for later use but is not relevant for
# autotagging.
_fingerprints = {}
_acoustids = {}
def prefix(it, count):
"""Truncate an iterable to at most `count` items.
"""
for i, v in enumerate(it):
if i >= count:
break
yield v
def releases_key(release, countries, original_year):
"""Used as a key to sort releases by date then preferred country
"""
date = release.get('date')
if date and original_year:
year = date.get('year', 9999)
month = date.get('month', 99)
day = date.get('day', 99)
else:
year = 9999
month = 99
day = 99
# Uses index of preferred countries to sort
country_key = 99
if release.get('country'):
for i, country in enumerate(countries):
if country.match(release['country']):
country_key = i
break
return (year, month, day, country_key)
def acoustid_match(log, path):
"""Gets metadata for a file from Acoustid and populates the
_matches, _fingerprints, and _acoustids dictionaries accordingly.
"""
try:
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
log.error('fingerprinting of {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
fp = fp.decode()
_fingerprints[path] = fp
try:
res = acoustid.lookup(API_KEY, fp, duration,
meta='recordings releases')
except acoustid.AcoustidError as exc:
log.debug('fingerprint matching {0} failed: {1}',
util.displayable_path(repr(path)), exc)
return None
log.debug('chroma: fingerprinted {0}',
util.displayable_path(repr(path)))
# Ensure the response is usable and parse it.
if res['status'] != 'ok' or not res.get('results'):
log.debug('no match found')
return None
result = res['results'][0] # Best match.
if result['score'] < SCORE_THRESH:
log.debug('no results above threshold')
return None
_acoustids[path] = result['id']
# Get recording and releases from the result
if not result.get('recordings'):
log.debug('no recordings found')
return None
recording_ids = []
releases = []
for recording in result['recordings']:
recording_ids.append(recording['id'])
if 'releases' in recording:
releases.extend(recording['releases'])
# The releases list is essentially in random order from the Acoustid lookup
# so we optionally sort it using the match.preferred configuration options.
# 'original_year' to sort the earliest first and
# 'countries' to then sort preferred countries first.
country_patterns = config['match']['preferred']['countries'].as_str_seq()
countries = [re.compile(pat, re.I) for pat in country_patterns]
original_year = config['match']['preferred']['original_year']
releases.sort(key=partial(releases_key,
countries=countries,
original_year=original_year))
release_ids = [rel['id'] for rel in releases]
log.debug('matched recordings {0} on releases {1}',
recording_ids, release_ids)
_matches[path] = recording_ids, release_ids
# Plugin structure and autotagging logic.
def _all_releases(items):
"""Given an iterable of Items, determines (according to Acoustid)
which releases the items have in common. Generates release IDs.
"""
# Count the number of "hits" for each release.
relcounts = defaultdict(int)
for item in items:
if item.path not in _matches:
continue
_, release_ids = _matches[item.path]
for release_id in release_ids:
relcounts[release_id] += 1
for release_id, count in relcounts.items():
if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id
class AcoustidPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
})
config['acoustid']['apikey'].redact = True
if self.config['auto']:
self.register_listener('import_task_start', self.fingerprint_task)
self.register_listener('import_task_apply', apply_acoustid_metadata)
def fingerprint_task(self, task, session):
return fingerprint_task(self._log, task, session)
def track_distance(self, item, info):
dist = hooks.Distance()
if item.path not in _matches or not info.track_id:
# Match failed or no track ID.
return dist
recording_ids, _ = _matches[item.path]
dist.add_expr('track_id', info.track_id not in recording_ids)
return dist
def candidates(self, items, artist, album, va_likely, extra_tags=None):
albums = []
for relid in prefix(_all_releases(items), MAX_RELEASES):
album = hooks.album_for_mbid(relid)
if album:
albums.append(album)
self._log.debug('acoustid album candidates: {0}', len(albums))
return albums
def item_candidates(self, item, artist, title):
if item.path not in _matches:
return []
recording_ids, _ = _matches[item.path]
tracks = []
for recording_id in prefix(recording_ids, MAX_RECORDINGS):
track = hooks.track_for_mbid(recording_id)
if track:
tracks.append(track)
self._log.debug('acoustid item candidates: {0}', len(tracks))
return tracks
def commands(self):
submit_cmd = ui.Subcommand('submit',
help='submit Acoustid fingerprints')
def submit_cmd_func(lib, opts, args):
try:
apikey = config['acoustid']['apikey'].as_str()
except confuse.NotFoundError:
raise ui.UserError('no Acoustid user API key provided')
submit_items(self._log, apikey, lib.items(ui.decargs(args)))
submit_cmd.func = submit_cmd_func
fingerprint_cmd = ui.Subcommand(
'fingerprint',
help='generate fingerprints for items without them'
)
def fingerprint_cmd_func(lib, opts, args):
for item in lib.items(ui.decargs(args)):
fingerprint_item(self._log, item, write=ui.should_write())
fingerprint_cmd.func = fingerprint_cmd_func
return [submit_cmd, fingerprint_cmd]
# Hooks into import process.
def fingerprint_task(log, task, session):
"""Fingerprint each item in the task for later use during the
autotagging candidate search.
"""
items = task.items if task.is_album else [task.item]
for item in items:
acoustid_match(log, item.path)
def apply_acoustid_metadata(task, session):
"""Apply Acoustid metadata (fingerprint and ID) to the task's items.
"""
for item in task.imported_items():
if item.path in _fingerprints:
item.acoustid_fingerprint = _fingerprints[item.path]
if item.path in _acoustids:
item.acoustid_id = _acoustids[item.path]
# UI commands.
def submit_items(log, userkey, items, chunksize=64):
"""Submit fingerprints for the items to the Acoustid server.
"""
data = [] # The running list of dictionaries to submit.
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
log.info('submitting {0} fingerprints', len(data))
try:
acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc:
log.warning('acoustid submission error: {0}', exc)
del data[:]
for item in items:
fp = fingerprint_item(log, item, write=ui.should_write())
# Construct a submission dictionary for this item.
item_data = {
'duration': int(item.length),
'fingerprint': fp,
}
if item.mb_trackid:
item_data['mbid'] = item.mb_trackid
log.debug('submitting MBID')
else:
item_data.update({
'track': item.title,
'artist': item.artist,
'album': item.album,
'albumartist': item.albumartist,
'year': item.year,
'trackno': item.track,
'discno': item.disc,
})
log.debug('submitting textual metadata')
data.append(item_data)
# If we have enough data, submit a chunk.
if len(data) >= chunksize:
submit_chunk()
# Submit remaining data in a final chunk.
if data:
submit_chunk()
def fingerprint_item(log, item, write=False):
"""Get the fingerprint for an Item. If the item already has a
fingerprint, it is not regenerated. If fingerprint generation fails,
return None. If the items are associated with a library, they are
saved to the database. If `write` is set, then the new fingerprints
are also written to files' metadata.
"""
# Get a fingerprint and length for this track.
if not item.length:
log.info('{0}: no duration available',
util.displayable_path(item.path))
elif item.acoustid_fingerprint:
if write:
log.info('{0}: fingerprint exists, skipping',
util.displayable_path(item.path))
else:
log.info('{0}: using existing fingerprint',
util.displayable_path(item.path))
return item.acoustid_fingerprint
else:
log.info('{0}: fingerprinting',
util.displayable_path(item.path))
try:
_, fp = acoustid.fingerprint_file(util.syspath(item.path))
item.acoustid_fingerprint = fp.decode()
if write:
log.info('{0}: writing fingerprint',
util.displayable_path(item.path))
item.try_write()
if item._db:
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
log.info('fingerprint generation failed: {0}', exc)
| 11,871
|
Python
|
.py
| 289
| 32.844291
| 79
| 0.636648
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,661
|
__init__.py
|
rembo10_headphones/lib/beetsplug/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A namespace package for beets plugins."""
# Make this a namespace package.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 802
|
Python
|
.py
| 17
| 46
| 71
| 0.776215
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,662
|
ftintitle.py
|
rembo10_headphones/lib/beetsplug/ftintitle.py
|
# This file is part of beets.
# Copyright 2016, Verrus, <github.com/Verrus/beets-plugin-featInTitle>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves "featured" artists to the title from the artist field.
"""
import re
from beets import plugins
from beets import ui
from beets.util import displayable_path
def split_on_feat(artist):
"""Given an artist string, split the "main" artist from any artist
on the right-hand side of a string like "feat". Return the main
artist, which is always a string, and the featuring artist, which
may be a string or None if none is present.
"""
# split on the first "feat".
regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)
parts = [s.strip() for s in regex.split(artist, 1)]
if len(parts) == 1:
return parts[0], None
else:
return tuple(parts)
def contains_feat(title):
"""Determine whether the title contains a "featured" marker.
"""
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
def find_feat_part(artist, albumartist):
"""Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found.
"""
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist, 1)
if len(albumartist_split) <= 1:
return None
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
# featured artist.
elif albumartist_split[1] != '':
# Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[1])
return feat_part
# Otherwise, if there's nothing on the right-hand side, look for a
# featuring artist on the left-hand side.
else:
lhs, rhs = split_on_feat(albumartist_split[0])
if lhs:
return lhs
return None
class FtInTitlePlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'auto': True,
'drop': False,
'format': 'feat. {0}',
})
self._command = ui.Subcommand(
'ftintitle',
help='move featured artists to the title field')
self._command.parser.add_option(
'-d', '--drop', dest='drop',
action='store_true', default=None,
help='drop featuring from artists and ignore title update')
if self.config['auto']:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
drop_feat = self.config['drop'].get(bool)
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self.ft_in_title(item, drop_feat)
item.store()
if write:
item.try_write()
self._command.func = func
return [self._command]
def imported(self, session, task):
"""Import hook for moving featuring artist automatically.
"""
drop_feat = self.config['drop'].get(bool)
for item in task.imported_items():
self.ft_in_title(item, drop_feat)
item.store()
def update_metadata(self, item, feat_part, drop_feat):
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In all cases, update the artist fields.
self._log.info('artist: {0} -> {1}', item.artist, item.albumartist)
item.artist = item.albumartist
if item.artist_sort:
# Just strip the featured artist from the sort name.
item.artist_sort, _ = split_on_feat(item.artist_sort)
# Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
feat_format = self.config['format'].as_str()
new_format = feat_format.format(feat_part)
new_title = f"{item.title} {new_format}"
self._log.info('title: {0} -> {1}', item.title, new_title)
item.title = new_title
def ft_in_title(self, item, drop_feat):
"""Look for featured artists in the item's artist fields and move
them to the title.
"""
artist = item.artist.strip()
albumartist = item.albumartist.strip()
# Check whether there is a featured artist on this track and the
# artist field does not exactly match the album artist field. In
# that case, we attempt to move the featured artist to the title.
_, featured = split_on_feat(artist)
if featured and albumartist != artist and albumartist:
self._log.info('{}', displayable_path(item.path))
feat_part = None
# Attempt to find the featured artist.
feat_part = find_feat_part(artist, albumartist)
# If we have a featuring artist, move it to the title.
if feat_part:
self.update_metadata(item, feat_part, drop_feat)
else:
self._log.info('no featuring artists found')
| 6,042
|
Python
|
.py
| 135
| 36.592593
| 77
| 0.640402
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,663
|
beatport.py
|
rembo10_headphones/lib/beetsplug/beatport.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
import json
import re
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.plugins import BeetsPlugin, MetadataSourcePlugin, get_distance
import confuse
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = f'beets/{beets.__version__} +https://beets.io/'
class BeatportAPIError(Exception):
pass
class BeatportObject:
def __init__(self, data):
self.beatport_id = data['id']
self.name = str(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], str(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [str(x['name'])
for x in data['genres']]
class BeatportClient:
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"https://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=[f'fieldType:{release_type}'])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
if response:
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
return None
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id,
perPage=100)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return "<BeatportRelease: {} - {} ({})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return str(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "https://beatport.com/release/{}/{}".format(
data['slug'], data['id'])
self.genre = data.get('genre')
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return ("<BeatportTrack: {} - {} ({})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return str(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = str(data['title'])
if 'mixName' in data:
self.mix_name = str(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "https://beatport.com/track/{}/{}" \
.format(data['slug'], data['id'])
self.track_number = data.get('trackNumber')
self.bpm = data.get('bpm')
self.initial_key = str(
(data.get('key') or {}).get('shortName')
)
# Use 'subgenre' and if not present, 'genre' as a fallback.
if data.get('subGenres'):
self.genre = str(data['subGenres'][0].get('name'))
elif data.get('genres'):
self.genre = str(data['genres'][0].get('name'))
class BeatportPlugin(BeetsPlugin):
data_source = 'Beatport'
def __init__(self):
super().__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug('authentication error: {0}', e)
raise beets.ui.UserError('communication with Beatport failed')
beets.ui.print_("To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_("Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug('authentication error: {0}', e)
raise beets.ui.UserError('Beatport token request failed')
# Save the token for later use.
self._log.debug('Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confuse.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the Beatport source weight and the maximum source weight
for albums.
"""
return get_distance(
data_source=self.data_source,
info=album_info,
config=self.config
)
def track_distance(self, item, track_info):
"""Returns the Beatport source weight and the maximum source weight
for individual tracks.
"""
return get_distance(
data_source=self.data_source,
info=track_info,
config=self.config
)
def candidates(self, items, artist, release, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = f'{artist} {release}'
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = f'{artist} {title}'
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the query is not a valid ID or release is not found.
"""
self._log.debug('Searching for release {0}', release_id)
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
self._log.debug('Not a valid Beatport release ID.')
return None
release = self.client.get_release(match.group(2))
if release:
return self._get_album_info(release)
return None
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not a valid Beatport ID or track is not found.
"""
self._log.debug('Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
self._log.debug('Not a valid Beatport track ID.')
return None
bp_track = self.client.get_track(match.group(2))
if bp_track is not None:
return self._get_track_info(bp_track)
return None
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, flags=re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, flags=re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = "Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media='Digital',
data_source=self.data_source, data_url=release.url,
genre=release.genre)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != "Original Mix":
title += f" ({track.mix_name})"
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=self.data_source, data_url=track.url,
bpm=track.bpm, initial_key=track.initial_key,
genre=track.genre)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
return MetadataSourcePlugin.get_artist(
artists=artists, id_key=0, name_key=1
)
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks
| 18,530
|
Python
|
.py
| 405
| 34.888889
| 79
| 0.58588
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,664
|
zero.py
|
rembo10_headphones/lib/beetsplug/zero.py
|
# This file is part of beets.
# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
""" Clears tag fields in media files."""
import re
from beets.plugins import BeetsPlugin
from mediafile import MediaFile
from beets.importer import action
from beets.ui import Subcommand, decargs, input_yn
import confuse
__author__ = 'baobab@heresiarch.info'
class ZeroPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener('write', self.write_event)
self.register_listener('import_task_choice',
self.import_task_choice_event)
self.config.add({
'auto': True,
'fields': [],
'keep_fields': [],
'update_database': False,
})
self.fields_to_progs = {}
self.warned = False
"""Read the bulk of the config into `self.fields_to_progs`.
After construction, `fields_to_progs` contains all the fields that
should be zeroed as keys and maps each of those to a list of compiled
regexes (progs) as values.
A field is zeroed if its value matches one of the associated progs. If
progs is empty, then the associated field is always zeroed.
"""
if self.config['fields'] and self.config['keep_fields']:
self._log.warning(
'cannot blacklist and whitelist at the same time'
)
# Blacklist mode.
elif self.config['fields']:
for field in self.config['fields'].as_str_seq():
self._set_pattern(field)
# Whitelist mode.
elif self.config['keep_fields']:
for field in MediaFile.fields():
if (field not in self.config['keep_fields'].as_str_seq() and
# These fields should always be preserved.
field not in ('id', 'path', 'album_id')):
self._set_pattern(field)
def commands(self):
zero_command = Subcommand('zero', help='set fields to null')
def zero_fields(lib, opts, args):
if not decargs(args) and not input_yn(
"Remove fields for all items? (Y/n)",
True):
return
for item in lib.items(decargs(args)):
self.process_item(item)
zero_command.func = zero_fields
return [zero_command]
def _set_pattern(self, field):
"""Populate `self.fields_to_progs` for a given field.
Do some sanity checks then compile the regexes.
"""
if field not in MediaFile.fields():
self._log.error('invalid field: {0}', field)
elif field in ('id', 'path', 'album_id'):
self._log.warning('field \'{0}\' ignored, zeroing '
'it would be dangerous', field)
else:
try:
for pattern in self.config[field].as_str_seq():
prog = re.compile(pattern, re.IGNORECASE)
self.fields_to_progs.setdefault(field, []).append(prog)
except confuse.NotFoundError:
# Matches everything
self.fields_to_progs[field] = []
def import_task_choice_event(self, session, task):
if task.choice_flag == action.ASIS and not self.warned:
self._log.warning('cannot zero in \"as-is\" mode')
self.warned = True
# TODO request write in as-is mode
def write_event(self, item, path, tags):
if self.config['auto']:
self.set_fields(item, tags)
def set_fields(self, item, tags):
"""Set values in `tags` to `None` if the field is in
`self.fields_to_progs` and any of the corresponding `progs` matches the
field value.
Also update the `item` itself if `update_database` is set in the
config.
"""
fields_set = False
if not self.fields_to_progs:
self._log.warning('no fields, nothing to do')
return False
for field, progs in self.fields_to_progs.items():
if field in tags:
value = tags[field]
match = _match_progs(tags[field], progs)
else:
value = ''
match = not progs
if match:
fields_set = True
self._log.debug('{0}: {1} -> None', field, value)
tags[field] = None
if self.config['update_database']:
item[field] = None
return fields_set
def process_item(self, item):
tags = dict(item)
if self.set_fields(item, tags):
item.write(tags=tags)
if self.config['update_database']:
item.store(fields=tags)
def _match_progs(value, progs):
"""Check if `value` (as string) is matching any of the compiled regexes in
the `progs` list.
"""
if not progs:
return True
for prog in progs:
if prog.search(str(value)):
return True
return False
| 5,667
|
Python
|
.py
| 134
| 31.985075
| 79
| 0.59034
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,665
|
ihate.py
|
rembo10_headphones/lib/beetsplug/ihate.py
|
# This file is part of beets.
# Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Warns you about things you hate (or even blocks import)."""
from beets.plugins import BeetsPlugin
from beets.importer import action
from beets.library import parse_query_string
from beets.library import Item
from beets.library import Album
__author__ = 'baobab@heresiarch.info'
__version__ = '2.0'
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return f'{task.cur_artist} - {task.cur_album}'
else:
return f'{task.item.artist} - {task.item.title}'
class IHatePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener('import_task_choice',
self.import_task_choice_event)
self.config.add({
'warn': [],
'skip': [],
})
@classmethod
def do_i_hate_this(cls, task, action_patterns):
"""Process group of patterns (warn or skip) and returns True if
task is hated and not whitelisted.
"""
if action_patterns:
for query_string in action_patterns:
query, _ = parse_query_string(
query_string,
Album if task.is_album else Item,
)
if any(query.match(item) for item in task.imported_items()):
return True
return False
def import_task_choice_event(self, session, task):
skip_queries = self.config['skip'].as_str_seq()
warn_queries = self.config['warn'].as_str_seq()
if task.choice_flag == action.APPLY:
if skip_queries or warn_queries:
self._log.debug('processing your hate')
if self.do_i_hate_this(task, skip_queries):
task.choice_flag = action.SKIP
self._log.info('skipped: {0}', summary(task))
return
if self.do_i_hate_this(task, warn_queries):
self._log.info('you may hate this: {0}', summary(task))
else:
self._log.debug('nothing to do')
else:
self._log.debug('user made a decision, nothing to do')
| 2,877
|
Python
|
.py
| 68
| 33.617647
| 76
| 0.62567
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,666
|
lyrics.py
|
rembo10_headphones/lib/beetsplug/lyrics.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches, embeds, and displays lyrics.
"""
import difflib
import errno
import itertools
import json
import struct
import os.path
import re
import requests
import unicodedata
from unidecode import unidecode
import warnings
import urllib
try:
import bs4
from bs4 import SoupStrainer
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
try:
import langdetect
HAS_LANGDETECT = True
except ImportError:
HAS_LANGDETECT = False
try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from html.parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
from beets import plugins
from beets import ui
import beets
DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S)
TAG_RE = re.compile(r'<[^>]*>')
BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
'\u2018': "'",
'\u2019': "'",
'\u201c': '"',
'\u201d': '"',
'\u2010': '-',
'\u2011': '-',
'\u2012': '-',
'\u2013': '-',
'\u2014': '-',
'\u2015': '-',
'\u2016': '-',
'\u2026': '...',
}
USER_AGENT = f'beets/{beets.__version__}'
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = '''Lyrics
======
* :ref:`Song index <genindex>`
* :ref:`search`
Artist index:
.. toctree::
:maxdepth: 1
:glob:
artists/*
'''
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = '''# -*- coding: utf-8 -*-
master_doc = 'index'
project = 'Lyrics'
copyright = 'none'
author = 'Various Authors'
latex_documents = [
(master_doc, 'Lyrics.tex', project,
author, 'manual'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
epub_tocdepth = 1
epub_tocdup = False
'''
# Utilities.
def unichar(i):
try:
return chr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def unescape(text):
"""Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
out = text.replace(' ', ' ')
def replchar(m):
num = m.group(1)
return unichar(int(num))
out = re.sub("&#(\\d+);", replchar, out)
return out
def extract_text_between(html, start_marker, end_marker):
try:
_, html = html.split(start_marker, 1)
html, _ = html.split(end_marker, 1)
except ValueError:
return ''
return html
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
The first item in the pair is the name of the artist, the second
item is a list of song names.
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The artist sort name is added as a fallback candidate to help in
cases where artist name includes special characters or is in a
non-latin script.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist, artist_sort = item.title, item.artist, item.artist_sort
patterns = [
# Remove any featuring artists from the artists name
fr"(.*?) {plugins.feat_tokens()}"]
artists = generate_alternatives(artist, patterns)
# Use the artist_sort as fallback only if it differs from artist to avoid
# repeated remote requests with the same search terms
if artist != artist_sort:
artists.append(artist_sort)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
r"(.*?) {}".format(plugins.feat_tokens(for_artist=False)),
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*"]
titles = generate_alternatives(title, patterns)
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
multi_titles = []
for title in titles:
multi_titles.append([title])
if '/' in title:
multi_titles.append([x.strip() for x in title.split('/')])
return itertools.product(artists, multi_titles)
def slug(text):
"""Make a URL-safe, human-readable version of the given text
This will do the following:
1. decode unicode characters into ASCII
2. shift everything to lowercase
3. strip whitespace
4. replace other non-word characters with dashes
5. strip extra dashes
This somewhat duplicates the :func:`Google.slugify` function but
slugify is not as generic as this one, which can be reused
elsewhere.
"""
return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
if HAS_BEAUTIFUL_SOUP:
def try_parse_html(html, **kwargs):
try:
return bs4.BeautifulSoup(html, 'html.parser', **kwargs)
except HTMLParseError:
return None
else:
def try_parse_html(html, **kwargs):
return None
class Backend:
REQUIRES_BS = False
def __init__(self, config, log):
self._log = log
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, str):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf-8', 'ignore')
return urllib.parse.quote(s)
def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()),
self._encode(title.title()))
def fetch_url(self, url):
"""Retrieve the content at a given URL, or return None if the source
is unreachable.
"""
try:
# Disable the InsecureRequestWarning that comes from using
# `verify=false`.
# https://github.com/kennethreitz/requests/issues/2214
# We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = requests.get(url, verify=False, headers={
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc:
self._log.debug('lyrics request failed: {0}', exc)
return
if r.status_code == requests.codes.ok:
return r.text
else:
self._log.debug('failed to fetch: {0} ({1})', url, r.status_code)
return None
def fetch(self, artist, title):
raise NotImplementedError()
class MusiXmatch(Backend):
REPLACEMENTS = {
r'\s+': '-',
'<': 'Less_Than',
'>': 'Greater_Than',
'#': 'Number_',
r'[\[\{]': '(',
r'[\]\}]': ')',
}
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
@classmethod
def _encode(cls, s):
for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s)
return super()._encode(s)
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return None
if "We detected that your IP is blocked" in html:
self._log.warning('we are blocked at MusixMatch: url %s failed'
% url)
return None
html_parts = html.split('<p class="mxm-lyrics__content')
# Sometimes lyrics come in 2 or more parts
lyrics_parts = []
for html_part in html_parts:
lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))
lyrics = '\n'.join(lyrics_parts)
lyrics = lyrics.strip(',"').replace('\\n', '\n')
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return None
# sometimes there are non-existent lyrics with some content
if 'Lyrics | Musixmatch' in lyrics:
return None
return lyrics
class Genius(Backend):
"""Fetch lyrics from Genius via genius-api.
Simply adapted from
bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
"""
REQUIRES_BS = True
base_url = "https://api.genius.com"
def __init__(self, config, log):
super().__init__(config, log)
self.api_key = config['genius_api_key'].as_str()
self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def fetch(self, artist, title):
"""Fetch lyrics from genius.com
Because genius doesn't allow accesssing lyrics via the api,
we first query the api for a url matching our artist & title,
then attempt to scrape that url for the lyrics.
"""
json = self._search(artist, title)
if not json:
self._log.debug('Genius API request returned invalid JSON')
return None
# find a matching artist in the json
for hit in json["response"]["hits"]:
hit_artist = hit["result"]["primary_artist"]["name"]
if slug(hit_artist) == slug(artist):
html = self.fetch_url(hit["result"]["url"])
if not html:
return None
return self._scrape_lyrics_from_html(html)
self._log.debug('Genius failed to find a matching artist for \'{0}\'',
artist)
return None
def _search(self, artist, title):
"""Searches the genius api for a given artist and title
https://docs.genius.com/#search-h2
:returns: json response
"""
search_url = self.base_url + "/search"
data = {'q': title + " " + artist.lower()}
try:
response = requests.get(
search_url, data=data, headers=self.headers)
except requests.RequestException as exc:
self._log.debug('Genius API request failed: {0}', exc)
return None
try:
return response.json()
except ValueError:
return None
def _scrape_lyrics_from_html(self, html):
"""Scrape lyrics from a given genius.com html"""
soup = try_parse_html(html)
if not soup:
return
# Remove script tags that they put in the middle of the lyrics.
[h.extract() for h in soup('script')]
# Most of the time, the page contains a div with class="lyrics" where
# all of the lyrics can be found already correctly formatted
# Sometimes, though, it packages the lyrics into separate divs, most
# likely for easier ad placement
lyrics_div = soup.find("div", class_="lyrics")
if not lyrics_div:
self._log.debug('Received unusual song page html')
verse_div = soup.find("div",
class_=re.compile("Lyrics__Container"))
if not verse_div:
if soup.find("div",
class_=re.compile("LyricsPlaceholder__Message"),
string="This song is an instrumental"):
self._log.debug('Detected instrumental')
return "[Instrumental]"
else:
self._log.debug("Couldn't scrape page using known layouts")
return None
lyrics_div = verse_div.parent
for br in lyrics_div.find_all("br"):
br.replace_with("\n")
ads = lyrics_div.find_all("div",
class_=re.compile("InreadAd__Container"))
for ad in ads:
ad.replace_with("\n")
return lyrics_div.get_text()
class Tekstowo(Backend):
# Fetch lyrics from Tekstowo.pl.
REQUIRES_BS = True
BASE_URL = 'http://www.tekstowo.pl'
URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'
def fetch(self, artist, title):
url = self.build_url(title, artist)
search_results = self.fetch_url(url)
if not search_results:
return None
song_page_url = self.parse_search_results(search_results)
if not song_page_url:
return None
song_page_html = self.fetch_url(song_page_url)
if not song_page_html:
return None
return self.extract_lyrics(song_page_html)
def parse_search_results(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
content_div = soup.find("div", class_="content")
if not content_div:
return None
card_div = content_div.find("div", class_="card")
if not card_div:
return None
song_rows = card_div.find_all("div", class_="box-przeboje")
if not song_rows:
return None
song_row = song_rows[0]
if not song_row:
return None
link = song_row.find('a')
if not link:
return None
return self.BASE_URL + link.get('href')
def extract_lyrics(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
lyrics_div = soup.find("div", class_="song-text")
if not lyrics_div:
return None
return lyrics_div.get_text()
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
"""
textlines = text.split('\n')
credits = None
for i in (0, -1):
if textlines and 'lyrics' in textlines[i].lower():
credits = textlines.pop(i)
if credits:
text = '\n'.join(textlines)
return text
def _scrape_strip_cruft(html, plain_text_out=False):
"""Clean up HTML
"""
html = unescape(html)
html = html.replace('\r', '\n') # Normalize EOL.
html = re.sub(r' +', ' ', html) # Whitespaces collapse.
html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
html = re.sub(r'(?s)<(script).*?</\1>', '', html) # Strip script tags.
html = re.sub('\u2005', " ", html) # replace unicode with regular space
if plain_text_out: # Strip remaining HTML tags
html = COMMENT_RE.sub('', html)
html = TAG_RE.sub('', html)
html = '\n'.join([x.strip() for x in html.strip().split('\n')])
html = re.sub(r'\n{3,}', r'\n\n', html)
return html
def _scrape_merge_paragraphs(html):
html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html)
return re.sub(r'<div .*>\s*</div>', '\n', html)
def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
def is_text_notcode(text):
length = len(text)
return (length > 20 and
text.count(' ') > length / 25 and
(text.find('{') == -1 or text.find(';') == -1))
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
# extract all long text blocks that are not code
soup = try_parse_html(html, parse_only=SoupStrainer(text=is_text_notcode))
if not soup:
return None
# Get the longest text element (if any).
strings = sorted(soup.stripped_strings, key=len, reverse=True)
if strings:
return strings[0]
else:
return None
class Google(Backend):
"""Fetch lyrics from Google search results."""
REQUIRES_BS = True
def __init__(self, config, log):
super().__init__(config, log)
self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].as_str()
def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics.
"""
if not text:
return False
bad_triggers_occ = []
nb_lines = text.count('\n')
if nb_lines <= 1:
self._log.debug("Ignoring too short lyrics '{0}'", text)
return False
elif nb_lines < 5:
bad_triggers_occ.append('too_short')
else:
# Lyrics look legit, remove credits to avoid being penalized
# further down
text = remove_credits(text)
bad_triggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
bad_triggers += [artist]
for item in bad_triggers:
bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item,
text, re.I))
if bad_triggers_occ:
self._log.debug('Bad triggers detected: {0}', bad_triggers_occ)
return len(bad_triggers_occ) < 2
def slugify(self, text):
"""Normalize a string and remove non-alphanumeric characters.
"""
text = re.sub(r"[-'_\s]", '_', text)
text = re.sub(r"_+", '_', text).strip('_')
pat = r"([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, r'\g<1>', text).strip()
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = str(re.sub(r'[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError:
self._log.exception("Failing to normalize '{0}'", text)
return text
BY_TRANS = ['by', 'par', 'de', 'von']
LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']
def is_page_candidate(self, url_link, url_title, title, artist):
"""Return True if the URL title makes it a good candidate to be a
page that contains lyrics of title by artist.
"""
title = self.slugify(title.lower())
artist = self.slugify(artist.lower())
sitename = re.search("//([^/]+)/.*",
self.slugify(url_link.lower())).group(1)
url_title = self.slugify(url_title.lower())
# Check if URL title contains song title (exact match)
if url_title.find(title) != -1:
return True
# or try extracting song title from URL title and check if
# they are close enough
tokens = [by + '_' + artist for by in self.BY_TRANS] + \
[artist, sitename, sitename.replace('www.', '')] + \
self.LYRICS_TRANS
tokens = [re.escape(t) for t in tokens]
song_title = re.sub('(%s)' % '|'.join(tokens), '', url_title)
song_title = song_title.strip('_|')
typo_ratio = .9
ratio = difflib.SequenceMatcher(None, song_title, title).ratio()
return ratio >= typo_ratio
def fetch(self, artist, title):
query = f"{artist} {title}"
url = 'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.parse.quote(query.encode('utf-8')))
data = self.fetch_url(url)
if not data:
self._log.debug('google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug('google backend returned malformed JSON: {}', exc)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug('google backend error: {0}', reason)
return None
if 'items' in data.keys():
for item in data['items']:
url_link = item['link']
url_title = item.get('title', '')
if not self.is_page_candidate(url_link, url_title,
title, artist):
continue
html = self.fetch_url(url_link)
if not html:
continue
lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
if self.is_lyrics(lyrics, artist):
self._log.debug('got lyrics from {0}',
item['displayLink'])
return lyrics
return None
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']
SOURCE_BACKENDS = {
'google': Google,
'musixmatch': MusiXmatch,
'genius': Genius,
'tekstowo': Tekstowo,
}
def __init__(self):
super().__init__()
self.import_stages = [self.imported]
self.config.add({
'auto': True,
'bing_client_secret': None,
'bing_lang_from': [],
'bing_lang_to': None,
'google_API_key': None,
'google_engine_ID': '009217259823014548361:lndtuqkycfu',
'genius_api_key':
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None,
'force': False,
'local': False,
'sources': self.SOURCES,
})
self.config['bing_client_secret'].redact = True
self.config['google_API_key'].redact = True
self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True
# State information for the ReST writer.
# First, the current artist we're writing.
self.artist = 'Unknown artist'
# The current album: False means no album yet.
self.album = False
# The current rest file content. None means the file is not
# open yet.
self.rest = None
available_sources = list(self.SOURCES)
sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if not HAS_BEAUTIFUL_SOUP:
sources = self.sanitize_bs_sources(sources)
if 'google' in sources:
if not self.config['google_API_key'].get():
# We log a *debug* message here because the default
# configuration includes `google`. This way, the source
# is silent by default but can be enabled just by
# setting an API key.
self._log.debug('Disabling google source: '
'no API key configured.')
sources.remove('google')
self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warning('To use bing translations, you need to '
'install the langdetect module. See the '
'documentation for further details.')
self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)
for source in sources]
def sanitize_bs_sources(self, sources):
enabled_sources = []
for source in sources:
if self.SOURCE_BACKENDS[source].REQUIRES_BS:
self._log.debug('To use the %s lyrics source, you must '
'install the beautifulsoup4 module. See '
'the documentation for further details.'
% source)
else:
enabled_sources.append(source)
return enabled_sources
def get_bing_access_token(self):
params = {
'client_id': 'beets',
'client_secret': self.config['bing_client_secret'],
'scope': "https://api.microsofttranslator.com",
'grant_type': 'client_credentials',
}
oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
oauth_token = json.loads(requests.post(
oauth_url,
data=urllib.parse.urlencode(params)).content)
if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token']
else:
self._log.warning('Could not get Bing Translate API access token.'
' Check your "bing_client_secret" password')
def commands(self):
cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
cmd.parser.add_option(
'-p', '--print', dest='printlyr',
action='store_true', default=False,
help='print lyrics to console',
)
cmd.parser.add_option(
'-r', '--write-rest', dest='writerest',
action='store', default=None, metavar='dir',
help='write lyrics to given directory as ReST files',
)
cmd.parser.add_option(
'-f', '--force', dest='force_refetch',
action='store_true', default=False,
help='always re-download lyrics',
)
cmd.parser.add_option(
'-l', '--local', dest='local_only',
action='store_true', default=False,
help='do not fetch missing lyrics',
)
def func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = ui.should_write()
if opts.writerest:
self.writerest_indexes(opts.writerest)
items = lib.items(ui.decargs(args))
for item in items:
if not opts.local_only and not self.config['local']:
self.fetch_item_lyrics(
lib, item, write,
opts.force_refetch or self.config['force'],
)
if item.lyrics:
if opts.printlyr:
ui.print_(item.lyrics)
if opts.writerest:
self.appendrest(opts.writerest, item)
if opts.writerest and items:
# flush last artist & write to ReST
self.writerest(opts.writerest)
ui.print_('ReST files generated. to build, use one of:')
ui.print_(' sphinx-build -b html %s _build/html'
% opts.writerest)
ui.print_(' sphinx-build -b epub %s _build/epub'
% opts.writerest)
ui.print_((' sphinx-build -b latex %s _build/latex '
'&& make -C _build/latex all-pdf')
% opts.writerest)
cmd.func = func
return [cmd]
def appendrest(self, directory, item):
"""Append the item to an ReST file
This will keep state (in the `rest` variable) in order to avoid
writing continuously to the same files.
"""
if slug(self.artist) != slug(item.albumartist):
# Write current file and start a new one ~ item.albumartist
self.writerest(directory)
self.artist = item.albumartist.strip()
self.rest = "%s\n%s\n\n.. contents::\n :local:\n\n" \
% (self.artist,
'=' * len(self.artist))
if self.album != item.album:
tmpalbum = self.album = item.album.strip()
if self.album == '':
tmpalbum = 'Unknown album'
self.rest += "{}\n{}\n\n".format(tmpalbum, '-' * len(tmpalbum))
title_str = ":index:`%s`" % item.title.strip()
block = '| ' + item.lyrics.replace('\n', '\n| ')
self.rest += "{}\n{}\n\n{}\n\n".format(title_str,
'~' * len(title_str),
block)
def writerest(self, directory):
"""Write self.rest to a ReST file
"""
if self.rest is not None and self.artist is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + '.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
def writerest_indexes(self, directory):
"""Write conf.py and index.rst files necessary for Sphinx
We write minimal configurations that are necessary for Sphinx
to operate. We do not overwrite existing files so that
customizations are respected."""
try:
os.makedirs(os.path.join(directory, 'artists'))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
indexfile = os.path.join(directory, 'index.rst')
if not os.path.exists(indexfile):
with open(indexfile, 'w') as output:
output.write(REST_INDEX_TEMPLATE)
conffile = os.path.join(directory, 'conf.py')
if not os.path.exists(conffile):
with open(conffile, 'w') as output:
output.write(REST_CONF_TEMPLATE)
def imported(self, session, task):
"""Import hook for fetching lyrics automatically.
"""
if self.config['auto']:
for item in task.imported_items():
self.fetch_item_lyrics(session.lib, item,
False, self.config['force'])
def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself.
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info('lyrics already present: {0}', item)
return
lyrics = None
for artist, titles in search_pairs(item):
lyrics = [self.get_lyrics(artist, title) for title in titles]
if any(lyrics):
break
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
self._log.info('fetched lyrics: {0}', item)
if HAS_LANGDETECT and self.config['bing_client_secret'].get():
lang_from = langdetect.detect(lyrics)
if self.config['bing_lang_to'].get() != lang_from and (
not self.config['bing_lang_from'] or (
lang_from in self.config[
'bing_lang_from'].as_str_seq())):
lyrics = self.append_translation(
lyrics, self.config['bing_lang_to'])
else:
self._log.info('lyrics not found: {0}', item)
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
else:
return
item.lyrics = lyrics
if write:
item.try_write()
item.store()
def get_lyrics(self, artist, title):
"""Fetch lyrics, trying each source in turn. Return a string or
None if no lyrics were found.
"""
for backend in self.backends:
lyrics = backend.fetch(artist, title)
if lyrics:
self._log.debug('got lyrics from backend: {0}',
backend.__class__.__name__)
return _scrape_strip_cruft(lyrics, True)
def append_translation(self, text, to_lang):
from xml.etree import ElementTree
if not self.bing_auth_token:
self.bing_auth_token = self.get_bing_access_token()
if self.bing_auth_token:
# Extract unique lines to limit API request size per song
text_lines = set(text.split('\n'))
url = ('https://api.microsofttranslator.com/v2/Http.svc/'
'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
r = requests.get(url,
headers={"Authorization ": self.bing_auth_token})
if r.status_code != 200:
self._log.debug('translation API error {}: {}', r.status_code,
r.text)
if 'token has expired' in r.text:
self.bing_auth_token = None
return self.append_translation(text, to_lang)
return text
lines_translated = ElementTree.fromstring(
r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|')))
result = ''
for line in text.split('\n'):
result += '{} / {}\n'.format(line, translations[line])
return result
| 34,170
|
Python
|
.py
| 829
| 30.810615
| 79
| 0.566414
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,667
|
embedart.py
|
rembo10_headphones/lib/beetsplug/embedart.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
import os.path
from beets.plugins import BeetsPlugin
from beets import ui
from beets.ui import print_, decargs
from beets.util import syspath, normpath, displayable_path, bytestring_path
from beets.util.artresizer import ArtResizer
from beets import config
from beets import art
def _confirm(objs, album):
"""Show the list of affected objects (items or albums) and confirm
that the user wants to modify their artwork.
`album` is a Boolean indicating whether these are albums (as opposed
to items).
"""
noun = 'album' if album else 'file'
prompt = 'Modify artwork for {} {}{} (Y/n)?'.format(
len(objs),
noun,
's' if len(objs) > 1 else ''
)
# Show all the items or albums.
for obj in objs:
print_(format(obj))
# Confirm with user.
return ui.input_yn(prompt)
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
def __init__(self):
super().__init__()
self.config.add({
'maxwidth': 0,
'auto': True,
'compare_threshold': 0,
'ifempty': False,
'remove_art_file': False,
'quality': 0,
})
if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
self.config['maxwidth'] = 0
self._log.warning("ImageMagick or PIL not found; "
"'maxwidth' option ignored")
if self.config['compare_threshold'].get(int) and not \
ArtResizer.shared.can_compare:
self.config['compare_threshold'] = 0
self._log.warning("ImageMagick 6.8.7 or higher not installed; "
"'compare_threshold' option ignored")
self.register_listener('art_set', self.process_album)
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand(
'embedart', help='embed image files into file metadata'
)
embed_cmd.parser.add_option(
'-f', '--file', metavar='PATH', help='the image file to embed'
)
embed_cmd.parser.add_option(
"-y", "--yes", action="store_true", help="skip confirmation"
)
maxwidth = self.config['maxwidth'].get(int)
quality = self.config['quality'].get(int)
compare_threshold = self.config['compare_threshold'].get(int)
ifempty = self.config['ifempty'].get(bool)
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
if not os.path.isfile(syspath(imagepath)):
raise ui.UserError('image file {} not found'.format(
displayable_path(imagepath)
))
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, not opts.file):
return
for item in items:
art.embed_item(self._log, item, imagepath, maxwidth,
None, compare_threshold, ifempty,
quality=quality)
else:
albums = lib.albums(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(albums, not opts.file):
return
for album in albums:
art.embed_album(self._log, album, maxwidth,
False, compare_threshold, ifempty,
quality=quality)
self.remove_artfile(album)
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand(
'extractart',
help='extract an image from file metadata',
)
extract_cmd.parser.add_option(
'-o', dest='outpath',
help='image output file',
)
extract_cmd.parser.add_option(
'-n', dest='filename',
help='image filename to create for all matched albums',
)
extract_cmd.parser.add_option(
'-a', dest='associate', action='store_true',
help='associate the extracted images with the album',
)
def extract_func(lib, opts, args):
if opts.outpath:
art.extract_first(self._log, normpath(opts.outpath),
lib.items(decargs(args)))
else:
filename = bytestring_path(opts.filename or
config['art_filename'].get())
if os.path.dirname(filename) != b'':
self._log.error(
"Only specify a name rather than a path for -n")
return
for album in lib.albums(decargs(args)):
artpath = normpath(os.path.join(album.path, filename))
artpath = art.extract_first(self._log, artpath,
album.items())
if artpath and opts.associate:
album.set_art(artpath)
album.store()
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand(
'clearart',
help='remove images from file metadata',
)
clear_cmd.parser.add_option(
"-y", "--yes", action="store_true", help="skip confirmation"
)
def clear_func(lib, opts, args):
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, False):
return
art.clear(self._log, lib, decargs(args))
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
def process_album(self, album):
"""Automatically embed art after art has been set
"""
if self.config['auto'] and ui.should_write():
max_width = self.config['maxwidth'].get(int)
art.embed_album(self._log, album, max_width, True,
self.config['compare_threshold'].get(int),
self.config['ifempty'].get(bool))
self.remove_artfile(album)
def remove_artfile(self, album):
"""Possibly delete the album art file for an album (if the
appropriate configuration option is enabled).
"""
if self.config['remove_art_file'] and album.artpath:
if os.path.isfile(album.artpath):
self._log.debug('Removing album art file for {0}', album)
os.remove(album.artpath)
album.artpath = None
album.store()
| 7,529
|
Python
|
.py
| 174
| 30.994253
| 76
| 0.562227
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,668
|
keyfinder.py
|
rembo10_headphones/lib/beetsplug/keyfinder.py
|
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses the `KeyFinder` program to add the `initial_key` field.
"""
import os.path
import subprocess
from beets import ui
from beets import util
from beets.plugins import BeetsPlugin
class KeyFinderPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'bin': 'KeyFinder',
'auto': True,
'overwrite': False,
})
if self.config['auto'].get(bool):
self.import_stages = [self.imported]
def commands(self):
cmd = ui.Subcommand('keyfinder',
help='detect and add initial key from audio')
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
def imported(self, session, task):
self.find_key(task.imported_items())
def find_key(self, items, write=False):
overwrite = self.config['overwrite'].get(bool)
command = [self.config['bin'].as_str()]
# The KeyFinder GUI program needs the -f flag before the path.
# keyfinder-cli is similar, but just wants the path with no flag.
if 'keyfinder-cli' not in os.path.basename(command[0]).lower():
command.append('-f')
for item in items:
if item['initial_key'] and not overwrite:
continue
try:
output = util.command_output(command + [util.syspath(
item.path)]).stdout
except (subprocess.CalledProcessError, OSError) as exc:
self._log.error('execution failed: {0}', exc)
continue
except UnicodeEncodeError:
# Workaround for Python 2 Windows bug.
# https://bugs.python.org/issue1759845
self._log.error('execution failed for Unicode path: {0!r}',
item.path)
continue
try:
key_raw = output.rsplit(None, 1)[-1]
except IndexError:
# Sometimes keyfinder-cli returns 0 but with no key, usually
# when the file is silent or corrupt, so we log and skip.
self._log.error('no key returned for path: {0}', item.path)
continue
try:
key = util.text_string(key_raw)
except UnicodeDecodeError:
self._log.error('output is invalid UTF-8')
continue
item['initial_key'] = key
self._log.info('added computed initial key {0} for {1}',
key, util.displayable_path(item.path))
if write:
item.try_write()
item.store()
| 3,453
|
Python
|
.py
| 79
| 32.873418
| 76
| 0.594458
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,669
|
sonosupdate.py
|
rembo10_headphones/lib/beetsplug/sonosupdate.py
|
# This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
"""
from beets.plugins import BeetsPlugin
import soco
class SonosUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controler.
"""
self._log.info('Requesting a Sonos library update...')
device = soco.discovery.any_soco()
if device:
device.music_library.start_library_update()
else:
self._log.warning('Could not find a Sonos device.')
return
self._log.info('Sonos update triggered')
| 1,606
|
Python
|
.py
| 37
| 38.27027
| 76
| 0.710256
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,670
|
gmusic.py
|
rembo10_headphones/lib/beetsplug/gmusic.py
|
# This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Deprecation warning for the removed gmusic plugin."""
from beets.plugins import BeetsPlugin
class Gmusic(BeetsPlugin):
def __init__(self):
super().__init__()
self._log.warning("The 'gmusic' plugin has been removed following the"
" shutdown of Google Play Music. Remove the plugin"
" from your configuration to silence this warning.")
| 1,026
|
Python
|
.py
| 20
| 46.45
| 78
| 0.728272
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,671
|
permissions.py
|
rembo10_headphones/lib/beetsplug/permissions.py
|
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""Convert a string to an integer, interpreting the text as octal.
Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
"""
if isinstance(perm, int):
perm = str(perm)
return int(perm, 8)
def check_permissions(path, permission):
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(util.syspath(path), permission):
log.warning(
'could not set permissions on {}',
util.displayable_path(path),
)
log.debug(
'set permissions to {}, but permissions are now {}',
permission,
os.stat(util.syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
self.config.add({
'file': '644',
'dir': '755',
})
self.register_listener('item_imported', self.fix)
self.register_listener('album_imported', self.fix)
self.register_listener('art_set', self.fix_art)
def fix(self, lib, item=None, album=None):
"""Fix the permissions for an imported Item or Album.
"""
files = []
dirs = set()
if item:
files.append(item.path)
dirs.update(dirs_in_library(lib.directory, item.path))
elif album:
for album_item in album.items():
files.append(album_item.path)
dirs.update(dirs_in_library(lib.directory, album_item.path))
self.set_permissions(files=files, dirs=dirs)
def fix_art(self, album):
"""Fix the permission for Album art file.
"""
if album.artpath:
self.set_permissions(files=[album.artpath])
def set_permissions(self, files=[], dirs=[]):
# Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
for path in files:
# Changing permissions on the destination file.
self._log.debug(
'setting file permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, file_perm, self._log)
# Change permissions for the directories.
for path in dirs:
# Changing permissions on the destination directory.
self._log.debug(
'setting directory permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, dir_perm, self._log)
| 4,108
|
Python
|
.py
| 101
| 31.792079
| 79
| 0.621269
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,672
|
importadded.py
|
rembo10_headphones/lib/beetsplug/importadded.py
|
"""Populate an item's `added` and `mtime` fields by using the file
modification time (mtime) of the item's source file before import.
Reimported albums and items are skipped.
"""
import os
from beets import util
from beets import importer
from beets.plugins import BeetsPlugin
class ImportAddedPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'preserve_mtimes': False,
'preserve_write_mtimes': False,
})
# item.id for new items that were reimported
self.reimported_item_ids = None
# album.path for old albums that were replaced by a reimported album
self.replaced_album_paths = None
# item path in the library to the mtime of the source file
self.item_mtime = {}
register = self.register_listener
register('import_task_created', self.check_config)
register('import_task_created', self.record_if_inplace)
register('import_task_files', self.record_reimported)
register('before_item_moved', self.record_import_mtime)
register('item_copied', self.record_import_mtime)
register('item_linked', self.record_import_mtime)
register('item_hardlinked', self.record_import_mtime)
register('album_imported', self.update_album_times)
register('item_imported', self.update_item_times)
register('after_write', self.update_after_write_time)
def check_config(self, task, session):
self.config['preserve_mtimes'].get(bool)
def reimported_item(self, item):
return item.id in self.reimported_item_ids
def reimported_album(self, album):
return album.path in self.replaced_album_paths
def record_if_inplace(self, task, session):
if not (session.config['copy'] or session.config['move'] or
session.config['link'] or session.config['hardlink']):
self._log.debug("In place import detected, recording mtimes from "
"source paths")
items = [task.item] \
if isinstance(task, importer.SingletonImportTask) \
else task.items
for item in items:
self.record_import_mtime(item, item.path, item.path)
def record_reimported(self, task, session):
self.reimported_item_ids = {item.id for item, replaced_items
in task.replaced_items.items()
if replaced_items}
self.replaced_album_paths = set(task.replaced_albums.keys())
def write_file_mtime(self, path, mtime):
"""Write the given mtime to the destination path.
"""
stat = os.stat(util.syspath(path))
os.utime(util.syspath(path), (stat.st_atime, mtime))
def write_item_mtime(self, item, mtime):
"""Write the given mtime to an item's `mtime` field and to the mtime
of the item's file.
"""
# The file's mtime on disk must be in sync with the item's mtime
self.write_file_mtime(util.syspath(item.path), mtime)
item.mtime = mtime
def record_import_mtime(self, item, source, destination):
"""Record the file mtime of an item's path before its import.
"""
mtime = os.stat(util.syspath(source)).st_mtime
self.item_mtime[destination] = mtime
self._log.debug("Recorded mtime {0} for item '{1}' imported from "
"'{2}'", mtime, util.displayable_path(destination),
util.displayable_path(source))
def update_album_times(self, lib, album):
if self.reimported_album(album):
self._log.debug("Album '{0}' is reimported, skipping import of "
"added dates for the album and its items.",
util.displayable_path(album.path))
return
album_mtimes = []
for item in album.items():
mtime = self.item_mtime.pop(item.path, None)
if mtime:
album_mtimes.append(mtime)
if self.config['preserve_mtimes'].get(bool):
self.write_item_mtime(item, mtime)
item.store()
album.added = min(album_mtimes)
self._log.debug("Import of album '{0}', selected album.added={1} "
"from item file mtimes.", album.album, album.added)
album.store()
def update_item_times(self, lib, item):
if self.reimported_item(item):
self._log.debug("Item '{0}' is reimported, skipping import of "
"added date.", util.displayable_path(item.path))
return
mtime = self.item_mtime.pop(item.path, None)
if mtime:
item.added = mtime
if self.config['preserve_mtimes'].get(bool):
self.write_item_mtime(item, mtime)
self._log.debug("Import of item '{0}', selected item.added={1}",
util.displayable_path(item.path), item.added)
item.store()
def update_after_write_time(self, item, path):
"""Update the mtime of the item's file with the item.added value
after each write of the item if `preserve_write_mtimes` is enabled.
"""
if item.added:
if self.config['preserve_write_mtimes'].get(bool):
self.write_item_mtime(item, item.added)
self._log.debug("Write of item '{0}', selected item.added={1}",
util.displayable_path(item.path), item.added)
| 5,587
|
Python
|
.py
| 113
| 37.973451
| 78
| 0.604766
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,673
|
subsonicplaylist.py
|
rembo10_headphones/lib/beetsplug/subsonicplaylist.py
|
# This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import random
import string
from xml.etree import ElementTree
from hashlib import md5
from urllib.parse import urlencode
import requests
from beets.dbcore import AndQuery
from beets.dbcore.query import MatchQuery
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
__author__ = 'https://github.com/MrNuggelz'
def filter_to_be_removed(items, keys):
if len(items) > len(keys):
dont_remove = []
for artist, album, title in keys:
for item in items:
if artist == item['artist'] and \
album == item['album'] and \
title == item['title']:
dont_remove.append(item)
return [item for item in items if item not in dont_remove]
else:
def to_be_removed(item):
for artist, album, title in keys:
if artist == item['artist'] and\
album == item['album'] and\
title == item['title']:
return False
return True
return [item for item in items if to_be_removed(item)]
class SubsonicPlaylistPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
'delete': False,
'playlist_ids': [],
'playlist_names': [],
'username': '',
'password': ''
}
)
self.config['password'].redact = True
def update_tags(self, playlist_dict, lib):
with lib.transaction():
for query, playlist_tag in playlist_dict.items():
query = AndQuery([MatchQuery("artist", query[0]),
MatchQuery("album", query[1]),
MatchQuery("title", query[2])])
items = lib.items(query)
if not items:
self._log.warn("{} | track not found ({})", playlist_tag,
query)
continue
for item in items:
item.subsonic_playlist = playlist_tag
item.try_sync(write=True, move=False)
def get_playlist(self, playlist_id):
xml = self.send('getPlaylist', {'id': playlist_id}).text
playlist = ElementTree.fromstring(xml)[0]
if playlist.attrib.get('code', '200') != '200':
alt_error = 'error getting playlist, but no error message found'
self._log.warn(playlist.attrib.get('message', alt_error))
return
name = playlist.attrib.get('name', 'undefined')
tracks = [(t.attrib['artist'], t.attrib['album'], t.attrib['title'])
for t in playlist]
return name, tracks
def commands(self):
def build_playlist(lib, opts, args):
self.config.set_args(opts)
ids = self.config['playlist_ids'].as_str_seq()
if self.config['playlist_names'].as_str_seq():
playlists = ElementTree.fromstring(
self.send('getPlaylists').text)[0]
if playlists.attrib.get('code', '200') != '200':
alt_error = 'error getting playlists,' \
' but no error message found'
self._log.warn(
playlists.attrib.get('message', alt_error))
return
for name in self.config['playlist_names'].as_str_seq():
for playlist in playlists:
if name == playlist.attrib['name']:
ids.append(playlist.attrib['id'])
playlist_dict = self.get_playlists(ids)
# delete old tags
if self.config['delete']:
existing = list(lib.items('subsonic_playlist:";"'))
to_be_removed = filter_to_be_removed(
existing,
playlist_dict.keys())
for item in to_be_removed:
item['subsonic_playlist'] = ''
with lib.transaction():
item.try_sync(write=True, move=False)
self.update_tags(playlist_dict, lib)
subsonicplaylist_cmds = Subcommand(
'subsonicplaylist', help='import a subsonic playlist'
)
subsonicplaylist_cmds.parser.add_option(
'-d',
'--delete',
action='store_true',
help='delete tag from items not in any playlist anymore',
)
subsonicplaylist_cmds.func = build_playlist
return [subsonicplaylist_cmds]
def generate_token(self):
salt = ''.join(random.choices(string.ascii_lowercase + string.digits))
return md5(
(self.config['password'].get() + salt).encode()).hexdigest(), salt
def send(self, endpoint, params=None):
if params is None:
params = {}
a, b = self.generate_token()
params['u'] = self.config['username']
params['t'] = a
params['s'] = b
params['v'] = '1.12.0'
params['c'] = 'beets'
resp = requests.get('{}/rest/{}?{}'.format(
self.config['base_url'].get(),
endpoint,
urlencode(params))
)
return resp
def get_playlists(self, ids):
output = {}
for playlist_id in ids:
name, tracks = self.get_playlist(playlist_id)
for track in tracks:
if track not in output:
output[track] = ';'
output[track] += name + ';'
return output
| 6,305
|
Python
|
.py
| 149
| 30.100671
| 78
| 0.547767
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,674
|
deezer.py
|
rembo10_headphones/lib/beetsplug/deezer.py
|
# This file is part of beets.
# Copyright 2019, Rahul Ahuja.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Deezer release and track search support to the autotagger
"""
import collections
import unidecode
import requests
from beets import ui
from beets.autotag import AlbumInfo, TrackInfo
from beets.plugins import MetadataSourcePlugin, BeetsPlugin
class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin):
data_source = 'Deezer'
# Base URLs for the Deezer API
# Documentation: https://developers.deezer.com/api/
search_url = 'https://api.deezer.com/search/'
album_url = 'https://api.deezer.com/album/'
track_url = 'https://api.deezer.com/track/'
id_regex = {
'pattern': r'(^|deezer\.com/)([a-z]*/)?({}/)?(\d+)',
'match_group': 4,
}
def __init__(self):
super().__init__()
def album_for_id(self, album_id):
"""Fetch an album by its Deezer ID or URL and return an
AlbumInfo object or None if the album is not found.
:param album_id: Deezer ID or URL for the album.
:type album_id: str
:return: AlbumInfo object for album.
:rtype: beets.autotag.hooks.AlbumInfo or None
"""
deezer_id = self._get_id('album', album_id)
if deezer_id is None:
return None
album_data = requests.get(self.album_url + deezer_id).json()
artist, artist_id = self.get_artist(album_data['contributors'])
release_date = album_data['release_date']
date_parts = [int(part) for part in release_date.split('-')]
num_date_parts = len(date_parts)
if num_date_parts == 3:
year, month, day = date_parts
elif num_date_parts == 2:
year, month = date_parts
day = None
elif num_date_parts == 1:
year = date_parts[0]
month = None
day = None
else:
raise ui.UserError(
"Invalid `release_date` returned "
"by {} API: '{}'".format(self.data_source, release_date)
)
tracks_data = requests.get(
self.album_url + deezer_id + '/tracks'
).json()['data']
if not tracks_data:
return None
tracks = []
medium_totals = collections.defaultdict(int)
for i, track_data in enumerate(tracks_data, start=1):
track = self._get_track(track_data)
track.index = i
medium_totals[track.medium] += 1
tracks.append(track)
for track in tracks:
track.medium_total = medium_totals[track.medium]
return AlbumInfo(
album=album_data['title'],
album_id=deezer_id,
artist=artist,
artist_credit=self.get_artist([album_data['artist']])[0],
artist_id=artist_id,
tracks=tracks,
albumtype=album_data['record_type'],
va=len(album_data['contributors']) == 1
and artist.lower() == 'various artists',
year=year,
month=month,
day=day,
label=album_data['label'],
mediums=max(medium_totals.keys()),
data_source=self.data_source,
data_url=album_data['link'],
)
def _get_track(self, track_data):
"""Convert a Deezer track object dict to a TrackInfo object.
:param track_data: Deezer Track object dict
:type track_data: dict
:return: TrackInfo object for track
:rtype: beets.autotag.hooks.TrackInfo
"""
artist, artist_id = self.get_artist(
track_data.get('contributors', [track_data['artist']])
)
return TrackInfo(
title=track_data['title'],
track_id=track_data['id'],
artist=artist,
artist_id=artist_id,
length=track_data['duration'],
index=track_data['track_position'],
medium=track_data['disk_number'],
medium_index=track_data['track_position'],
data_source=self.data_source,
data_url=track_data['link'],
)
def track_for_id(self, track_id=None, track_data=None):
"""Fetch a track by its Deezer ID or URL and return a
TrackInfo object or None if the track is not found.
:param track_id: (Optional) Deezer ID or URL for the track. Either
``track_id`` or ``track_data`` must be provided.
:type track_id: str
:param track_data: (Optional) Simplified track object dict. May be
provided instead of ``track_id`` to avoid unnecessary API calls.
:type track_data: dict
:return: TrackInfo object for track
:rtype: beets.autotag.hooks.TrackInfo or None
"""
if track_data is None:
deezer_id = self._get_id('track', track_id)
if deezer_id is None:
return None
track_data = requests.get(self.track_url + deezer_id).json()
track = self._get_track(track_data)
# Get album's tracks to set `track.index` (position on the entire
# release) and `track.medium_total` (total number of tracks on
# the track's disc).
album_tracks_data = requests.get(
self.album_url + str(track_data['album']['id']) + '/tracks'
).json()['data']
medium_total = 0
for i, track_data in enumerate(album_tracks_data, start=1):
if track_data['disk_number'] == track.medium:
medium_total += 1
if track_data['id'] == track.track_id:
track.index = i
track.medium_total = medium_total
return track
@staticmethod
def _construct_search_query(filters=None, keywords=''):
"""Construct a query string with the specified filters and keywords to
be provided to the Deezer Search API
(https://developers.deezer.com/api/search).
:param filters: (Optional) Field filters to apply.
:type filters: dict
:param keywords: (Optional) Query keywords to use.
:type keywords: str
:return: Query string to be provided to the Search API.
:rtype: str
"""
query_components = [
keywords,
' '.join(f'{k}:"{v}"' for k, v in filters.items()),
]
query = ' '.join([q for q in query_components if q])
if not isinstance(query, str):
query = query.decode('utf8')
return unidecode.unidecode(query)
def _search_api(self, query_type, filters=None, keywords=''):
"""Query the Deezer Search API for the specified ``keywords``, applying
the provided ``filters``.
:param query_type: The Deezer Search API method to use. Valid types
are: 'album', 'artist', 'history', 'playlist', 'podcast',
'radio', 'track', 'user', and 'track'.
:type query_type: str
:param filters: (Optional) Field filters to apply.
:type filters: dict
:param keywords: (Optional) Query keywords to use.
:type keywords: str
:return: JSON data for the class:`Response <Response>` object or None
if no search results are returned.
:rtype: dict or None
"""
query = self._construct_search_query(
keywords=keywords, filters=filters
)
if not query:
return None
self._log.debug(
f"Searching {self.data_source} for '{query}'"
)
response = requests.get(
self.search_url + query_type, params={'q': query}
)
response.raise_for_status()
response_data = response.json().get('data', [])
self._log.debug(
"Found {} result(s) from {} for '{}'",
len(response_data),
self.data_source,
query,
)
return response_data
| 8,463
|
Python
|
.py
| 205
| 31.809756
| 79
| 0.595773
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,675
|
mbsubmit.py
|
rembo10_headphones/lib/beetsplug/mbsubmit.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson and Diego Moreda.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
from beets.autotag import Recommendation
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beetsplug.info import print_data
class MBSubmitPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'format': '$track. $title - $artist ($length)',
'threshold': 'medium',
})
# Validate and store threshold.
self.threshold = self.config['threshold'].as_choice({
'none': Recommendation.none,
'low': Recommendation.low,
'medium': Recommendation.medium,
'strong': Recommendation.strong
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
if task.rec <= self.threshold:
return [PromptChoice('p', 'Print tracks', self.print_tracks)]
def print_tracks(self, session, task):
for i in sorted(task.items, key=lambda i: i.track):
print_data(None, i, self.config['format'].as_str())
| 2,107
|
Python
|
.py
| 45
| 40.866667
| 77
| 0.705366
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,676
|
bpm.py
|
rembo10_headphones/lib/beetsplug/bpm.py
|
# This file is part of beets.
# Copyright 2016, aroquen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Determine BPM by pressing a key to the rhythm."""
import time
from beets import ui
from beets.plugins import BeetsPlugin
def bpm(max_strokes):
"""Returns average BPM (possibly of a playing song)
listening to Enter keystrokes.
"""
t0 = None
dt = []
for i in range(max_strokes):
# Press enter to the rhythm...
s = input()
if s == '':
t1 = time.time()
# Only start measuring at the second stroke
if t0:
dt.append(t1 - t0)
t0 = t1
else:
break
# Return average BPM
# bpm = (max_strokes-1) / sum(dt) * 60
ave = sum([1.0 / dti * 60 for dti in dt]) / len(dt)
return ave
class BPMPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'max_strokes': 3,
'overwrite': True,
})
def commands(self):
cmd = ui.Subcommand('bpm',
help='determine bpm of a song by pressing '
'a key to the rhythm')
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
items = lib.items(ui.decargs(args))
write = ui.should_write()
self.get_bpm(items, write)
def get_bpm(self, items, write=False):
overwrite = self.config['overwrite'].get(bool)
if len(items) > 1:
raise ValueError('Can only get bpm of one song at time')
item = items[0]
if item['bpm']:
self._log.info('Found bpm {0}', item['bpm'])
if not overwrite:
return
self._log.info('Press Enter {0} times to the rhythm or Ctrl-D '
'to exit', self.config['max_strokes'].get(int))
new_bpm = bpm(self.config['max_strokes'].get(int))
item['bpm'] = int(new_bpm)
if write:
item.try_write()
item.store()
self._log.info('Added new bpm {0}', item['bpm'])
| 2,647
|
Python
|
.py
| 72
| 28.847222
| 71
| 0.596875
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,677
|
info.py
|
rembo10_headphones/lib/beetsplug/info.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Shows file metadata.
"""
import os
from beets.plugins import BeetsPlugin
from beets import ui
import mediafile
from beets.library import Item
from beets.util import displayable_path, normpath, syspath
def tag_data(lib, args, album=False):
query = []
for arg in args:
path = normpath(arg)
if os.path.isfile(syspath(path)):
yield tag_data_emitter(path)
else:
query.append(arg)
if query:
for item in lib.items(query):
yield tag_data_emitter(item.path)
def tag_fields():
fields = set(mediafile.MediaFile.readable_fields())
fields.add('art')
return fields
def tag_data_emitter(path):
def emitter(included_keys):
if included_keys == '*':
fields = tag_fields()
else:
fields = included_keys
if 'images' in fields:
# We can't serialize the image data.
fields.remove('images')
mf = mediafile.MediaFile(syspath(path))
tags = {}
for field in fields:
if field == 'art':
tags[field] = mf.art is not None
else:
tags[field] = getattr(mf, field, None)
# create a temporary Item to take advantage of __format__
item = Item.from_path(syspath(path))
return tags, item
return emitter
def library_data(lib, args, album=False):
for item in lib.albums(args) if album else lib.items(args):
yield library_data_emitter(item)
def library_data_emitter(item):
def emitter(included_keys):
data = dict(item.formatted(included_keys=included_keys))
return data, item
return emitter
def update_summary(summary, tags):
for key, value in tags.items():
if key not in summary:
summary[key] = value
elif summary[key] != value:
summary[key] = '[various]'
return summary
def print_data(data, item=None, fmt=None):
"""Print, with optional formatting, the fields of a single element.
If no format string `fmt` is passed, the entries on `data` are printed one
in each line, with the format 'field: value'. If `fmt` is not `None`, the
`item` is printed according to `fmt`, using the `Item.__format__`
machinery.
"""
if fmt:
# use fmt specified by the user
ui.print_(format(item, fmt))
return
path = displayable_path(item.path) if item else None
formatted = {}
for key, value in data.items():
if isinstance(value, list):
formatted[key] = '; '.join(value)
if value is not None:
formatted[key] = value
if len(formatted) == 0:
return
maxwidth = max(len(key) for key in formatted)
lineformat = f'{{0:>{maxwidth}}}: {{1}}'
if path:
ui.print_(displayable_path(path))
for field in sorted(formatted):
value = formatted[field]
if isinstance(value, list):
value = '; '.join(value)
ui.print_(lineformat.format(field, value))
def print_data_keys(data, item=None):
"""Print only the keys (field names) for an item.
"""
path = displayable_path(item.path) if item else None
formatted = []
for key, value in data.items():
formatted.append(key)
if len(formatted) == 0:
return
line_format = '{0}{{0}}'.format(' ' * 4)
if path:
ui.print_(displayable_path(path))
for field in sorted(formatted):
ui.print_(line_format.format(field))
class InfoPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand('info', help='show file metadata')
cmd.func = self.run
cmd.parser.add_option(
'-l', '--library', action='store_true',
help='show library fields instead of tags',
)
cmd.parser.add_option(
'-a', '--album', action='store_true',
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
'-s', '--summarize', action='store_true',
help='summarize the tags of all files',
)
cmd.parser.add_option(
'-i', '--include-keys', default=[],
action='append', dest='included_keys',
help='comma separated list of keys to show',
)
cmd.parser.add_option(
'-k', '--keys-only', action='store_true',
help='show only the keys',
)
cmd.parser.add_format_option(target='item')
return [cmd]
def run(self, lib, opts, args):
"""Print tag info or library data for each file referenced by args.
Main entry point for the `beet info ARGS...` command.
If an argument is a path pointing to an existing file, then the tags
of that file are printed. All other arguments are considered
queries, and for each item matching all those queries the tags from
the file are printed.
If `opts.summarize` is true, the function merges all tags into one
dictionary and only prints that. If two files have different values
for the same tag, the value is set to '[various]'
"""
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(','))
# Drop path even if user provides it multiple times
included_keys = [k for k in included_keys if k != 'path']
first = True
summary = {}
for data_emitter in data_collector(
lib, ui.decargs(args),
album=opts.album,
):
try:
data, item = data_emitter(included_keys or '*')
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error('cannot read file: {0}', ex)
continue
if opts.summarize:
update_summary(summary, data)
else:
if not first:
ui.print_()
if opts.keys_only:
print_data_keys(data, item)
else:
fmt = ui.decargs([opts.format])[0] if opts.format else None
print_data(data, item, fmt)
first = False
if opts.summarize:
print_data(summary)
| 7,083
|
Python
|
.py
| 185
| 29.697297
| 79
| 0.606945
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,678
|
bench.py
|
rembo10_headphones/lib/beetsplug/bench.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some simple performance benchmarks for beets.
"""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import vfs
from beets import library
from beets.util.functemplate import Template
from beets.autotag import match
from beets import plugins
from beets import importer
import cProfile
import timeit
def aunique_benchmark(lib, prof):
def _build_tree():
vfs.libtree(lib)
# Measure path generation performance with %aunique{} included.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%aunique{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('With %aunique:', interval)
# And with %aunique replaceed with a "cheap" no-op function.
lib.path_formats = [
(library.PF_KEY_DEFAULT,
Template('$albumartist/$album%lower{}/$track $title')),
]
if prof:
cProfile.runctx('_build_tree()', {}, {'_build_tree': _build_tree},
'paths.withoutaunique.prof')
else:
interval = timeit.timeit(_build_tree, number=1)
print('Without %aunique:', interval)
def match_benchmark(lib, prof, query=None, album_id=None):
# If no album ID is provided, we'll match against a suitably huge
# album.
if not album_id:
album_id = '9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc'
# Get an album from the library to use as the source for the match.
items = lib.albums(query).get().items()
# Ensure fingerprinting is invoked (if enabled).
plugins.send('import_task_start',
task=importer.ImportTask(None, None, items),
session=importer.ImportSession(lib, None, None, None))
# Run the match.
def _run_match():
match.tag_album(items, search_ids=[album_id])
if prof:
cProfile.runctx('_run_match()', {}, {'_run_match': _run_match},
'match.prof')
else:
interval = timeit.timeit(_run_match, number=1)
print('match duration:', interval)
class BenchmarkPlugin(BeetsPlugin):
"""A plugin for performing some simple performance benchmarks.
"""
def commands(self):
aunique_bench_cmd = ui.Subcommand('bench_aunique',
help='benchmark for %aunique{}')
aunique_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
aunique_bench_cmd.func = lambda lib, opts, args: \
aunique_benchmark(lib, opts.profile)
match_bench_cmd = ui.Subcommand('bench_match',
help='benchmark for track matching')
match_bench_cmd.parser.add_option('-p', '--profile',
action='store_true', default=False,
help='performance profiling')
match_bench_cmd.parser.add_option('-i', '--id', default=None,
help='album ID to match against')
match_bench_cmd.func = lambda lib, opts, args: \
match_benchmark(lib, opts.profile, ui.decargs(args), opts.id)
return [aunique_bench_cmd, match_bench_cmd]
| 4,111
|
Python
|
.py
| 91
| 36.087912
| 79
| 0.629121
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,679
|
mbcollection.py
|
rembo10_headphones/lib/beetsplug/mbcollection.py
|
# This file is part of beets.
# Copyright (c) 2011, Jeffrey Aylesworth <mail@jeffrey.red>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets import ui
from beets import config
import musicbrainzngs
import re
SUBMISSION_CHUNK_SIZE = 200
FETCH_CHUNK_SIZE = 100
UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$'
def mb_call(func, *args, **kwargs):
"""Call a MusicBrainz API function and catch exceptions.
"""
try:
return func(*args, **kwargs)
except musicbrainzngs.AuthenticationError:
raise ui.UserError('authentication with MusicBrainz failed')
except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc:
raise ui.UserError(f'MusicBrainz API error: {exc}')
except musicbrainzngs.UsageError:
raise ui.UserError('MusicBrainz credentials missing')
def submit_albums(collection_id, release_ids):
"""Add all of the release IDs to the indicated collection. Multiple
requests are made if there are many release IDs to submit.
"""
for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE):
chunk = release_ids[i:i + SUBMISSION_CHUNK_SIZE]
mb_call(
musicbrainzngs.add_releases_to_collection,
collection_id, chunk
)
class MusicBrainzCollectionPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config['musicbrainz']['pass'].redact = True
musicbrainzngs.auth(
config['musicbrainz']['user'].as_str(),
config['musicbrainz']['pass'].as_str(),
)
self.config.add({
'auto': False,
'collection': '',
'remove': False,
})
if self.config['auto']:
self.import_stages = [self.imported]
def _get_collection(self):
collections = mb_call(musicbrainzngs.get_collections)
if not collections['collection-list']:
raise ui.UserError('no collections exist for user')
# Get all collection IDs, avoiding event collections
collection_ids = [x['id'] for x in collections['collection-list']]
if not collection_ids:
raise ui.UserError('No collection found.')
# Check that the collection exists so we can present a nice error
collection = self.config['collection'].as_str()
if collection:
if collection not in collection_ids:
raise ui.UserError('invalid collection ID: {}'
.format(collection))
return collection
# No specified collection. Just return the first collection ID
return collection_ids[0]
def _get_albums_in_collection(self, id):
def _fetch(offset):
res = mb_call(
musicbrainzngs.get_releases_in_collection,
id,
limit=FETCH_CHUNK_SIZE,
offset=offset
)['collection']
return [x['id'] for x in res['release-list']], res['release-count']
offset = 0
albums_in_collection, release_count = _fetch(offset)
for i in range(0, release_count, FETCH_CHUNK_SIZE):
albums_in_collection += _fetch(offset)[0]
offset += FETCH_CHUNK_SIZE
return albums_in_collection
def commands(self):
mbupdate = Subcommand('mbupdate',
help='Update MusicBrainz collection')
mbupdate.parser.add_option('-r', '--remove',
action='store_true',
default=None,
dest='remove',
help='Remove albums not in beets library')
mbupdate.func = self.update_collection
return [mbupdate]
def remove_missing(self, collection_id, lib_albums):
lib_ids = {x.mb_albumid for x in lib_albums}
albums_in_collection = self._get_albums_in_collection(collection_id)
remove_me = list(set(albums_in_collection) - lib_ids)
for i in range(0, len(remove_me), FETCH_CHUNK_SIZE):
chunk = remove_me[i:i + FETCH_CHUNK_SIZE]
mb_call(
musicbrainzngs.remove_releases_from_collection,
collection_id, chunk
)
def update_collection(self, lib, opts, args):
self.config.set_args(opts)
remove_missing = self.config['remove'].get(bool)
self.update_album_list(lib, lib.albums(), remove_missing)
def imported(self, session, task):
"""Add each imported album to the collection.
"""
if task.is_album:
self.update_album_list(session.lib, [task.album])
def update_album_list(self, lib, album_list, remove_missing=False):
"""Update the MusicBrainz collection from a list of Beets albums
"""
collection_id = self._get_collection()
# Get a list of all the album IDs.
album_ids = []
for album in album_list:
aid = album.mb_albumid
if aid:
if re.match(UUID_REGEX, aid):
album_ids.append(aid)
else:
self._log.info('skipping invalid MBID: {0}', aid)
# Submit to MusicBrainz.
self._log.info(
'Updating MusicBrainz collection {0}...', collection_id
)
submit_albums(collection_id, album_ids)
if remove_missing:
self.remove_missing(collection_id, lib.albums())
self._log.info('...MusicBrainz collection updated.')
| 6,156
|
Python
|
.py
| 140
| 34.221429
| 79
| 0.620661
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,680
|
random.py
|
rembo10_headphones/lib/beetsplug/random.py
|
# This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Get a random song or album from the library.
"""
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs, print_
from beets.random import random_objs
def random_func(lib, opts, args):
"""Select some random items or albums and print the results.
"""
# Fetch all the objects matching the query into a list.
query = decargs(args)
if opts.album:
objs = list(lib.albums(query))
else:
objs = list(lib.items(query))
# Print a random subset.
objs = random_objs(objs, opts.album, opts.number, opts.time,
opts.equal_chance)
for obj in objs:
print_(format(obj))
random_cmd = Subcommand('random',
help='choose a random track or album')
random_cmd.parser.add_option(
'-n', '--number', action='store', type="int",
help='number of objects to choose', default=1)
random_cmd.parser.add_option(
'-e', '--equal-chance', action='store_true',
help='each artist has the same chance')
random_cmd.parser.add_option(
'-t', '--time', action='store', type="float",
help='total length in minutes of objects to choose')
random_cmd.parser.add_all_common_options()
random_cmd.func = random_func
class Random(BeetsPlugin):
def commands(self):
return [random_cmd]
| 1,957
|
Python
|
.py
| 48
| 36.604167
| 71
| 0.711053
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,681
|
embyupdate.py
|
rembo10_headphones/lib/beetsplug/embyupdate.py
|
"""Updates the Emby Library whenever the beets library is changed.
emby:
host: localhost
port: 8096
username: user
apikey: apikey
password: password
"""
import hashlib
import requests
from urllib.parse import urlencode, urljoin, parse_qs, urlsplit, urlunsplit
from beets import config
from beets.plugins import BeetsPlugin
def api_url(host, port, endpoint):
"""Returns a joined url.
Takes host, port and endpoint and generates a valid emby API url.
:param host: Hostname of the emby server
:param port: Portnumber of the emby server
:param endpoint: API endpoint
:type host: str
:type port: int
:type endpoint: str
:returns: Full API url
:rtype: str
"""
# check if http or https is defined as host and create hostname
hostname_list = [host]
if host.startswith('http://') or host.startswith('https://'):
hostname = ''.join(hostname_list)
else:
hostname_list.insert(0, 'http://')
hostname = ''.join(hostname_list)
joined = urljoin(
'{hostname}:{port}'.format(
hostname=hostname,
port=port
),
endpoint
)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
query_params['format'] = ['json']
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def password_data(username, password):
"""Returns a dict with username and its encoded password.
:param username: Emby username
:param password: Emby password
:type username: str
:type password: str
:returns: Dictionary with username and encoded password
:rtype: dict
"""
return {
'username': username,
'password': hashlib.sha1(password.encode('utf-8')).hexdigest(),
'passwordMd5': hashlib.md5(password.encode('utf-8')).hexdigest()
}
def create_headers(user_id, token=None):
"""Return header dict that is needed to talk to the Emby API.
:param user_id: Emby user ID
:param token: Authentication token for Emby
:type user_id: str
:type token: str
:returns: Headers for requests
:rtype: dict
"""
headers = {}
authorization = (
'MediaBrowser UserId="{user_id}", '
'Client="other", '
'Device="beets", '
'DeviceId="beets", '
'Version="0.0.0"'
).format(user_id=user_id)
headers['x-emby-authorization'] = authorization
if token:
headers['x-mediabrowser-token'] = token
return headers
def get_token(host, port, headers, auth_data):
"""Return token for a user.
:param host: Emby host
:param port: Emby port
:param headers: Headers for requests
:param auth_data: Username and encoded password for authentication
:type host: str
:type port: int
:type headers: dict
:type auth_data: dict
:returns: Access Token
:rtype: str
"""
url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data)
return r.json().get('AccessToken')
def get_user(host, port, username):
"""Return user dict from server or None if there is no user.
:param host: Emby host
:param port: Emby port
:username: Username
:type host: str
:type port: int
:type username: str
:returns: Matched Users
:rtype: list
"""
url = api_url(host, port, '/Users/Public')
r = requests.get(url)
user = [i for i in r.json() if i['Name'] == username]
return user
class EmbyUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
config['emby'].add({
'host': 'http://localhost',
'port': 8096,
'apikey': None,
'password': None,
})
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end.
"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Emby.
"""
self._log.info('Updating Emby library...')
host = config['emby']['host'].get()
port = config['emby']['port'].get()
username = config['emby']['username'].get()
password = config['emby']['password'].get()
token = config['emby']['apikey'].get()
# Check if at least a apikey or password is given.
if not any([password, token]):
self._log.warning('Provide at least Emby password or apikey.')
return
# Get user information from the Emby API.
user = get_user(host, port, username)
if not user:
self._log.warning(f'User {username} could not be found.')
return
if not token:
# Create Authentication data and headers.
auth_data = password_data(username, password)
headers = create_headers(user[0]['Id'])
# Get authentication token.
token = get_token(host, port, headers, auth_data)
if not token:
self._log.warning(
'Could not get token for user {0}', username
)
return
# Recreate headers with a token.
headers = create_headers(user[0]['Id'], token=token)
# Trigger the Update.
url = api_url(host, port, '/Library/Refresh')
r = requests.post(url, headers=headers)
if r.status_code != 204:
self._log.warning('Update could not be triggered')
else:
self._log.info('Update triggered.')
| 5,827
|
Python
|
.py
| 163
| 28.404908
| 76
| 0.620242
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,682
|
types.py
|
rembo10_headphones/lib/beetsplug/types.py
|
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from beets.plugins import BeetsPlugin
from beets.dbcore import types
from confuse import ConfigValueError
from beets import library
class TypesPlugin(BeetsPlugin):
@property
def item_types(self):
return self._types()
@property
def album_types(self):
return self._types()
def _types(self):
if not self.config.exists():
return {}
mytypes = {}
for key, value in self.config.items():
if value.get() == 'int':
mytypes[key] = types.INTEGER
elif value.get() == 'float':
mytypes[key] = types.FLOAT
elif value.get() == 'bool':
mytypes[key] = types.BOOLEAN
elif value.get() == 'date':
mytypes[key] = library.DateType()
else:
raise ConfigValueError(
"unknown type '{}' for the '{}' field"
.format(value, key))
return mytypes
| 1,629
|
Python
|
.py
| 42
| 31.404762
| 71
| 0.646612
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,683
|
__init__.py
|
rembo10_headphones/lib/beetsplug/lastgenre/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Gets genres for imported music based on Last.fm tags.
Uses a provided whitelist file to determine which tags are valid genres.
The included (default) genre list was originally produced by scraping Wikipedia
and has been edited to remove some questionable entries.
The scraper script used is available here:
https://gist.github.com/1241307
"""
import pylast
import codecs
import os
import yaml
import traceback
from beets import plugins
from beets import ui
from beets import config
from beets.util import normpath, plurality
from beets import library
LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY)
PYLAST_EXCEPTIONS = (
pylast.WSError,
pylast.MalformedResponseError,
pylast.NetworkError,
)
REPLACE = {
'\u2010': '-',
}
def deduplicate(seq):
"""Remove duplicates from sequence wile preserving order.
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# Canonicalization tree processing.
def flatten_tree(elem, path, branches):
"""Flatten nested lists/dictionaries into lists of strings
(branches).
"""
if not path:
path = []
if isinstance(elem, dict):
for (k, v) in elem.items():
flatten_tree(v, path + [k], branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [str(elem)])
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[:idx + 1]))
except ValueError:
continue
return [candidate]
# Main plugin logic.
WHITELIST = os.path.join(os.path.dirname(__file__), 'genres.txt')
C14N_TREE = os.path.join(os.path.dirname(__file__), 'genres-tree.yaml')
class LastGenrePlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'whitelist': True,
'min_weight': 10,
'count': 1,
'fallback': None,
'canonical': False,
'source': 'album',
'force': True,
'auto': True,
'separator': ', ',
'prefer_specific': False,
'title_case': True,
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
if self.config['auto']:
self.import_stages = [self.imported]
self._genre_cache = {}
# Read the whitelist file if enabled.
self.whitelist = set()
wl_filename = self.config['whitelist'].get()
if wl_filename in (True, ''): # Indicates the default whitelist.
wl_filename = WHITELIST
if wl_filename:
wl_filename = normpath(wl_filename)
with open(wl_filename, 'rb') as f:
for line in f:
line = line.decode('utf-8').strip().lower()
if line and not line.startswith('#'):
self.whitelist.add(line)
# Read the genres tree for canonicalization if enabled.
self.c14n_branches = []
c14n_filename = self.config['canonical'].get()
self.canonicalize = c14n_filename is not False
# Default tree
if c14n_filename in (True, ''):
c14n_filename = C14N_TREE
elif not self.canonicalize and self.config['prefer_specific'].get():
# prefer_specific requires a tree, load default tree
c14n_filename = C14N_TREE
# Read the tree
if c14n_filename:
self._log.debug('Loading canonicalization tree {0}', c14n_filename)
c14n_filename = normpath(c14n_filename)
with codecs.open(c14n_filename, 'r', encoding='utf-8') as f:
genres_tree = yaml.safe_load(f)
flatten_tree(genres_tree, [], self.c14n_branches)
@property
def sources(self):
"""A tuple of allowed genre sources. May contain 'track',
'album', or 'artist.'
"""
source = self.config['source'].as_choice(('track', 'album', 'artist'))
if source == 'track':
return 'track', 'album', 'artist'
elif source == 'album':
return 'album', 'artist'
elif source == 'artist':
return 'artist',
def _get_depth(self, tag):
"""Find the depth of a tag in the genres tree.
"""
depth = None
for key, value in enumerate(self.c14n_branches):
if tag in value:
depth = value.index(tag)
break
return depth
def _sort_by_depth(self, tags):
"""Given a list of tags, sort the tags by their depths in the
genre tree.
"""
depth_tag_pairs = [(self._get_depth(t), t) for t in tags]
depth_tag_pairs = [e for e in depth_tag_pairs if e[0] is not None]
depth_tag_pairs.sort(reverse=True)
return [p[1] for p in depth_tag_pairs]
def _resolve_genres(self, tags):
"""Given a list of strings, return a genre by joining them into a
single string and (optionally) canonicalizing each.
"""
if not tags:
return None
count = self.config['count'].get(int)
if self.canonicalize:
# Extend the list to consider tags parents in the c14n tree
tags_all = []
for tag in tags:
# Add parents that are in the whitelist, or add the oldest
# ancestor if no whitelist
if self.whitelist:
parents = [x for x in find_parents(tag, self.c14n_branches)
if self._is_allowed(x)]
else:
parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents
# Stop if we have enough tags already, unless we need to find
# the most specific tag (instead of the most popular).
if (not self.config['prefer_specific'] and
len(tags_all) >= count):
break
tags = tags_all
tags = deduplicate(tags)
# Sort the tags by specificity.
if self.config['prefer_specific']:
tags = self._sort_by_depth(tags)
# c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list
tags = [self._format_tag(x) for x in tags if self._is_allowed(x)]
return self.config['separator'].as_str().join(
tags[:self.config['count'].get(int)]
)
def _format_tag(self, tag):
if self.config["title_case"]:
return tag.title()
return tag
def fetch_genre(self, lastfm_obj):
"""Return the genre for a pylast entity or None if no suitable genre
can be found. Ex. 'Electronic, House, Dance'
"""
min_weight = self.config['min_weight'].get(int)
return self._resolve_genres(self._tags_for(lastfm_obj, min_weight))
def _is_allowed(self, genre):
"""Determine whether the genre is present in the whitelist,
returning a boolean.
"""
if genre is None:
return False
if not self.whitelist or genre in self.whitelist:
return True
return False
# Cached entity lookups.
def _last_lookup(self, entity, method, *args):
"""Get a genre based on the named entity using the callable `method`
whose arguments are given in the sequence `args`. The genre lookup
is cached based on the entity name and the arguments. Before the
lookup, each argument is has some Unicode characters replaced with
rough ASCII equivalents in order to return better results from the
Last.fm database.
"""
# Shortcut if we're missing metadata.
if any(not s for s in args):
return None
key = '{}.{}'.format(entity,
'-'.join(str(a) for a in args))
if key in self._genre_cache:
return self._genre_cache[key]
else:
args_replaced = []
for arg in args:
for k, v in REPLACE.items():
arg = arg.replace(k, v)
args_replaced.append(arg)
genre = self.fetch_genre(method(*args_replaced))
self._genre_cache[key] = genre
return genre
def fetch_album_genre(self, obj):
"""Return the album genre for this Item or Album.
"""
return self._last_lookup(
'album', LASTFM.get_album, obj.albumartist, obj.album
)
def fetch_album_artist_genre(self, obj):
"""Return the album artist genre for this Item or Album.
"""
return self._last_lookup(
'artist', LASTFM.get_artist, obj.albumartist
)
def fetch_artist_genre(self, item):
"""Returns the track artist genre for this Item.
"""
return self._last_lookup(
'artist', LASTFM.get_artist, item.artist
)
def fetch_track_genre(self, obj):
"""Returns the track genre for this Item.
"""
return self._last_lookup(
'track', LASTFM.get_track, obj.artist, obj.title
)
def _get_genre(self, obj):
"""Get the genre string for an Album or Item object based on
self.sources. Return a `(genre, source)` pair. The
prioritization order is:
- track (for Items only)
- album
- artist
- original
- fallback
- None
"""
# Shortcut to existing genre if not forcing.
if not self.config['force'] and self._is_allowed(obj.genre):
return obj.genre, 'keep'
# Track genre (for Items only).
if isinstance(obj, library.Item):
if 'track' in self.sources:
result = self.fetch_track_genre(obj)
if result:
return result, 'track'
# Album genre.
if 'album' in self.sources:
result = self.fetch_album_genre(obj)
if result:
return result, 'album'
# Artist (or album artist) genre.
if 'artist' in self.sources:
result = None
if isinstance(obj, library.Item):
result = self.fetch_artist_genre(obj)
elif obj.albumartist != config['va_name'].as_str():
result = self.fetch_album_artist_genre(obj)
else:
# For "Various Artists", pick the most popular track genre.
item_genres = []
for item in obj.items():
item_genre = None
if 'track' in self.sources:
item_genre = self.fetch_track_genre(item)
if not item_genre:
item_genre = self.fetch_artist_genre(item)
if item_genre:
item_genres.append(item_genre)
if item_genres:
result, _ = plurality(item_genres)
if result:
return result, 'artist'
# Filter the existing genre.
if obj.genre:
result = self._resolve_genres([obj.genre])
if result:
return result, 'original'
# Fallback string.
fallback = self.config['fallback'].get()
if fallback:
return fallback, 'fallback'
return None, None
def commands(self):
lastgenre_cmd = ui.Subcommand('lastgenre', help='fetch genres')
lastgenre_cmd.parser.add_option(
'-f', '--force', dest='force',
action='store_true',
help='re-download genre when already present'
)
lastgenre_cmd.parser.add_option(
'-s', '--source', dest='source', type='string',
help='genre source: artist, album, or track'
)
lastgenre_cmd.parser.add_option(
'-A', '--items', action='store_false', dest='album',
help='match items instead of albums')
lastgenre_cmd.parser.add_option(
'-a', '--albums', action='store_true', dest='album',
help='match albums instead of items')
lastgenre_cmd.parser.set_defaults(album=True)
def lastgenre_func(lib, opts, args):
write = ui.should_write()
self.config.set_args(opts)
if opts.album:
# Fetch genres for whole albums
for album in lib.albums(ui.decargs(args)):
album.genre, src = self._get_genre(album)
self._log.info('genre for album {0} ({1}): {0.genre}',
album, src)
album.store()
for item in album.items():
# If we're using track-level sources, also look up each
# track on the album.
if 'track' in self.sources:
item.genre, src = self._get_genre(item)
item.store()
self._log.info(
'genre for track {0} ({1}): {0.genre}',
item, src)
if write:
item.try_write()
else:
# Just query singletons, i.e. items that are not part of
# an album
for item in lib.items(ui.decargs(args)):
item.genre, src = self._get_genre(item)
self._log.debug('added last.fm item genre ({0}): {1}',
src, item.genre)
item.store()
lastgenre_cmd.func = lastgenre_func
return [lastgenre_cmd]
def imported(self, session, task):
"""Event hook called when an import task finishes."""
if task.is_album:
album = task.album
album.genre, src = self._get_genre(album)
self._log.debug('added last.fm album genre ({0}): {1}',
src, album.genre)
album.store()
if 'track' in self.sources:
for item in album.items():
item.genre, src = self._get_genre(item)
self._log.debug('added last.fm item genre ({0}): {1}',
src, item.genre)
item.store()
else:
item = task.item
item.genre, src = self._get_genre(item)
self._log.debug('added last.fm item genre ({0}): {1}',
src, item.genre)
item.store()
def _tags_for(self, obj, min_weight=None):
"""Core genre identification routine.
Given a pylast entity (album or track), return a list of
tag names for that entity. Return an empty list if the entity is
not found or another error occurs.
If `min_weight` is specified, tags are filtered by weight.
"""
# Work around an inconsistency in pylast where
# Album.get_top_tags() does not return TopItem instances.
# https://github.com/pylast/pylast/issues/86
if isinstance(obj, pylast.Album):
obj = super(pylast.Album, obj)
try:
res = obj.get_top_tags()
except PYLAST_EXCEPTIONS as exc:
self._log.debug('last.fm error: {0}', exc)
return []
except Exception as exc:
# Isolate bugs in pylast.
self._log.debug('{}', traceback.format_exc())
self._log.error('error in pylast library: {0}', exc)
return []
# Filter by weight (optionally).
if min_weight:
res = [el for el in res if (int(el.weight or 0)) >= min_weight]
# Get strings from tags.
res = [el.item.get_name().lower() for el in res]
return res
| 16,849
|
Python
|
.py
| 411
| 29.742092
| 79
| 0.561205
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,684
|
gstplayer.py
|
rembo10_headphones/lib/beetsplug/bpd/gstplayer.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
import sys
import time
import _thread
import os
import copy
import urllib
from beets import ui
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer:
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# https://pygstdocs.berlios.de/pygst-tutorial/playbin.html (gone)
# https://brettviren.github.io/pygst-tutorial-org/pygst-tutorial.html
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(f"Error: {err}")
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(Gst.State.NULL)
if isinstance(path, str):
path = path.encode('utf-8')
uri = 'file://' + urllib.parse.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
def start():
loop = GLib.MainLoop()
loop.run()
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = Gst.Format(Gst.Format.TIME)
try:
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] / (10 ** 9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] / (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def get_decoders(self):
return get_decoders()
def get_decoders():
"""Get supported audio decoders from GStreamer.
Returns a dict mapping decoder element names to the associated media types
and file extensions.
"""
# We only care about audio decoder elements.
filt = (Gst.ELEMENT_FACTORY_TYPE_DEPAYLOADER |
Gst.ELEMENT_FACTORY_TYPE_DEMUXER |
Gst.ELEMENT_FACTORY_TYPE_PARSER |
Gst.ELEMENT_FACTORY_TYPE_DECODER |
Gst.ELEMENT_FACTORY_TYPE_MEDIA_AUDIO)
decoders = {}
mime_types = set()
for f in Gst.ElementFactory.list_get_elements(filt, Gst.Rank.NONE):
for pad in f.get_static_pad_templates():
if pad.direction == Gst.PadDirection.SINK:
caps = pad.static_caps.get()
mimes = set()
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime == 'unknown/unknown':
continue
mimes.add(mime)
mime_types.add(mime)
if mimes:
decoders[f.get_name()] = (mimes, set())
# Check all the TypeFindFactory plugin features form the registry. If they
# are associated with an audio media type that we found above, get the list
# of corresponding file extensions.
mime_extensions = {mime: set() for mime in mime_types}
for feat in Gst.Registry.get().get_feature_list(Gst.TypeFindFactory):
caps = feat.get_caps()
if caps:
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime in mime_types:
mime_extensions[mime].update(feat.get_extensions())
# Fill in the slot we left for file extensions.
for name, (mimes, exts) in decoders.items():
for mime in mimes:
exts.update(mime_extensions[mime])
return decoders
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p))
for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
| 9,905
|
Python
|
.py
| 245
| 31.804082
| 79
| 0.620769
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,685
|
__init__.py
|
rembo10_headphones/lib/beetsplug/bpd/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
import re
import sys
from string import Template
import traceback
import random
import time
import math
import inspect
import socket
import beets
from beets.plugins import BeetsPlugin
import beets.ui
from beets import vfs
from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from mediafile import MediaFile
PROTOCOL_VERSION = '0.16.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
NEWLINE = "\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
'close', 'commands', 'notcommands', 'password', 'ping',
)
# List of subsystems/events used by the `idle` command.
SUBSYSTEMS = [
'update', 'player', 'mixer', 'options', 'playlist', 'database',
# Related to unsupported commands:
'stored_playlist', 'output', 'subscription', 'sticker', 'message',
'partition',
]
ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys())
# Gstreamer import error.
class NoGstreamerError(Exception):
pass
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name='', index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template('$resp [$code@$index] {$cmd_name} $message')
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute({
'resp': RESP_ERR,
'code': self.code,
'index': self.index,
'cmd_name': self.cmd_name,
'message': self.message,
})
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message.
"""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ''
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, 'invalid type for argument')
ArgumentIndexError = make_bpd_error(ERROR_ARG, 'argument out of range')
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, 'argument not found')
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDClose(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
class BPDIdle(Exception):
"""Raised by a command to indicate the client wants to enter the idle state
and should be notified when a relevant event happens.
"""
def __init__(self, subsystems):
super().__init__()
self.subsystems = set(subsystems)
# Generic server infrastructure, implementing the basic protocol.
class BaseServer:
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password, ctrl_port, log, ctrl_host=None):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
A separate control socket is established listening to `ctrl_host` on
port `ctrl_port` which is used to forward notifications from the player
and can be sent debug commands (e.g. using netcat).
"""
self.host, self.port, self.password = host, port, password
self.ctrl_host, self.ctrl_port = ctrl_host or host, ctrl_port
self.ctrl_sock = None
self._log = log
# Default server values.
self.random = False
self.repeat = False
self.consume = False
self.single = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.mixrampdb = 0.0
self.mixrampdelay = float('nan')
self.replay_gain_mode = 'off'
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Current connections
self.connections = set()
# Object for random numbers generation
self.random_obj = random.Random()
def connect(self, conn):
"""A new client has connected.
"""
self.connections.add(conn)
def disconnect(self, conn):
"""Client has disconnected; clean up residual state.
"""
self.connections.remove(conn)
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
def start():
yield bluelet.spawn(
bluelet.server(self.ctrl_host, self.ctrl_port,
ControlConnection.handler(self)))
yield bluelet.server(self.host, self.port,
MPDConnection.handler(self))
bluelet.run(start())
def dispatch_events(self):
"""If any clients have idle events ready, send them.
"""
# We need a copy of `self.connections` here since clients might
# disconnect once we try and send to them, changing `self.connections`.
for conn in list(self.connections):
yield bluelet.spawn(conn.send_notifications())
def _ctrl_send(self, message):
"""Send some data over the control socket.
If it's our first time, open the socket. The message should be a
string without a terminal newline.
"""
if not self.ctrl_sock:
self.ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ctrl_sock.connect((self.ctrl_host, self.ctrl_port))
self.ctrl_sock.sendall((message + '\n').encode('utf-8'))
def _send_event(self, event):
"""Notify subscribed connections of an event."""
for conn in self.connections:
conn.notify(event)
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item.
"""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random, single and repeat flags.
No boundaries are checked.
"""
if self.repeat and self.single:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat and self.single:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_idle(self, conn, *subsystems):
subsystems = subsystems or SUBSYSTEMS
for system in subsystems:
if system not in SUBSYSTEMS:
raise BPDError(ERROR_ARG,
f'Unrecognised idle event: {system}')
raise BPDIdle(subsystems) # put the connection into idle mode
def cmd_kill(self, conn):
"""Exits the server process."""
sys.exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDClose()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, 'incorrect password')
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield 'command: ' + cmd
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith('cmd_'):
yield 'command: ' + func[4:]
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith('cmd_'):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield 'command: ' + cmd
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
'repeat: ' + str(int(self.repeat)),
'random: ' + str(int(self.random)),
'consume: ' + str(int(self.consume)),
'single: ' + str(int(self.single)),
'playlist: ' + str(self.playlist_version),
'playlistlength: ' + str(len(self.playlist)),
'mixrampdb: ' + str(self.mixrampdb),
)
if self.volume > 0:
yield 'volume: ' + str(self.volume)
if not math.isnan(self.mixrampdelay):
yield 'mixrampdelay: ' + str(self.mixrampdelay)
if self.crossfade > 0:
yield 'xfade: ' + str(self.crossfade)
if self.current_index == -1:
state = 'stop'
elif self.paused:
state = 'pause'
else:
state = 'play'
yield 'state: ' + state
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield 'song: ' + str(self.current_index)
yield 'songid: ' + str(current_id)
if len(self.playlist) > self.current_index + 1:
# If there's a next song, report its index too.
next_id = self._item_id(self.playlist[self.current_index + 1])
yield 'nextsong: ' + str(self.current_index + 1)
yield 'nextsongid: ' + str(next_id)
if self.error:
yield 'error: ' + self.error
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg('intbool', state)
self._send_event('options')
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg('intbool', state)
self._send_event('options')
def cmd_consume(self, conn, state):
"""Set or unset consume mode."""
self.consume = cast_arg('intbool', state)
self._send_event('options')
def cmd_single(self, conn, state):
"""Set or unset single mode."""
# TODO support oneshot in addition to 0 and 1 [MPD 0.20]
self.single = cast_arg('intbool', state)
self._send_event('options')
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, 'volume out of range')
self.volume = vol
self._send_event('mixer')
def cmd_volume(self, conn, vol_delta):
"""Deprecated command to change the volume by a relative amount."""
vol_delta = cast_arg(int, vol_delta)
return self.cmd_setvol(conn, self.volume + vol_delta)
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, 'crossfade time must be nonnegative')
self._log.warning('crossfade is not implemented in bpd')
self.crossfade = crossfade
self._send_event('options')
def cmd_mixrampdb(self, conn, db):
"""Set the mixramp normalised max volume in dB."""
db = cast_arg(float, db)
if db > 0:
raise BPDError(ERROR_ARG, 'mixrampdb time must be negative')
self._log.warning('mixramp is not implemented in bpd')
self.mixrampdb = db
self._send_event('options')
def cmd_mixrampdelay(self, conn, delay):
"""Set the mixramp delay in seconds."""
delay = cast_arg(float, delay)
if delay < 0:
raise BPDError(ERROR_ARG, 'mixrampdelay time must be nonnegative')
self._log.warning('mixramp is not implemented in bpd')
self.mixrampdelay = delay
self._send_event('options')
def cmd_replay_gain_mode(self, conn, mode):
"""Set the replay gain mode."""
if mode not in ['off', 'track', 'album', 'auto']:
raise BPDError(ERROR_ARG, 'Unrecognised replay gain mode')
self._log.warning('replay gain is not implemented in bpd')
self.replay_gain_mode = mode
self._send_event('options')
def cmd_replay_gain_status(self, conn):
"""Get the replaygain mode."""
yield 'replay_gain_mode: ' + str(self.replay_gain_mode)
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
self._send_event('playlist')
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
self._send_event('playlist')
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
self._send_event('playlist')
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
self._send_event('playlist')
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=None):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
if index is None:
for track in self.playlist:
yield self._item_info(track)
else:
indices = self._parse_range(index, accept_single_number=True)
try:
tracks = [self.playlist[i] for i in indices]
except IndexError:
raise ArgumentIndexError()
for track in tracks:
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=None):
if track_id is not None:
track_id = cast_arg(int, track_id)
track_id = self._id_to_index(track_id)
return self.cmd_playlistinfo(conn, track_id)
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield 'cpos: ' + str(idx)
yield 'Id: ' + str(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
"""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
old_index = self.current_index
self.current_index = self._succ_idx()
if self.consume:
# TODO how does consume interact with single+repeat?
self.playlist.pop(old_index)
if self.current_index > old_index:
self.current_index -= 1
self.playlist_version += 1
self._send_event("playlist")
if self.current_index >= len(self.playlist):
# Fallen off the end. Move to stopped state or loop.
if self.repeat:
self.current_index = -1
return self.cmd_play(conn)
return self.cmd_stop(conn)
elif self.single and not self.repeat:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
old_index = self.current_index
self.current_index = self._prev_idx()
if self.consume:
self.playlist.pop(old_index)
if self.current_index < 0:
if self.repeat:
self.current_index = len(self.playlist) - 1
else:
self.current_index = 0
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg('intbool', state)
self._send_event('player')
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index >= len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
self._send_event('player')
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
self._send_event('player')
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
self._send_event('player')
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
# Additions to the MPD protocol.
def cmd_crash_TypeError(self, conn): # noqa: N802
"""Deliberately trigger a TypeError for testing purposes.
We want to test that the server properly responds with ERROR_SYSTEM
without crashing, and that this is not treated as ERROR_ARG (since it
is caused by a programming error, not a protocol error).
"""
'a' + 2
class Connection:
"""A connection between a client and the server.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
self.server = server
self.sock = sock
self.address = '{}:{}'.format(*sock.sock.getpeername())
def debug(self, message, kind=' '):
"""Log a debug message about this connection.
"""
self.server._log.debug('{}[{}]: {}', kind, self.address, message)
def run(self):
pass
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, str):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
for l in out.split(NEWLINE)[:-1]:
self.debug(l, kind='>')
if isinstance(out, str):
out = out.encode('utf-8')
return self.sock.sendall(out)
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it.
"""
return cls(server, sock).run()
return _handle
class MPDConnection(Connection):
"""A connection that receives commands from an MPD-compatible client.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
super().__init__(server, sock)
self.authenticated = False
self.notifications = set()
self.idle_subscriptions = set()
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def disconnect(self):
"""The connection has closed for any reason.
"""
self.server.disconnect(self)
self.debug('disconnected', kind='*')
def notify(self, event):
"""Queue up an event for sending to this client.
"""
self.notifications.add(event)
def send_notifications(self, force_close_idle=False):
"""Send the client any queued events now.
"""
pending = self.notifications.intersection(self.idle_subscriptions)
try:
for event in pending:
yield self.send(f'changed: {event}')
if pending or force_close_idle:
self.idle_subscriptions = set()
self.notifications = self.notifications.difference(pending)
yield self.send(RESP_OK)
except bluelet.SocketClosedError:
self.disconnect() # Client disappeared.
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
self.debug('connected', kind='*')
self.server.connect(self)
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
self.disconnect() # Client disappeared.
break
line = line.strip()
if not line:
err = BPDError(ERROR_UNKNOWN, 'No command given')
yield self.send(err.response())
self.disconnect() # Client sent a blank line.
break
line = line.decode('utf8') # MPD protocol uses UTF-8.
for l in line.split(NEWLINE):
self.debug(l, kind='<')
if self.idle_subscriptions:
# The connection is in idle mode.
if line == 'noidle':
yield bluelet.call(self.send_notifications(True))
else:
err = BPDError(ERROR_UNKNOWN,
f'Got command while idle: {line}')
yield self.send(err.response())
break
continue
if line == 'noidle':
# When not in idle, this command sends no response.
continue
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
yield bluelet.call(self.server.dispatch_events())
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDClose:
# Command indicates that the conn should close.
self.sock.close()
self.disconnect() # Client explicitly closed.
return
except BPDIdle as e:
self.idle_subscriptions = e.subsystems
self.debug('awaiting: {}'.format(' '.join(e.subsystems)),
kind='z')
yield bluelet.call(self.server.dispatch_events())
class ControlConnection(Connection):
"""A connection used to control BPD for debugging and internal events.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
super().__init__(server, sock)
def debug(self, message, kind=' '):
self.server._log.debug('CTRL {}[{}]: {}', kind, self.address, message)
def run(self):
"""Listen for control commands and delegate to `ctrl_*` methods.
"""
self.debug('connected', kind='*')
while True:
line = yield self.sock.readline()
if not line:
break # Client disappeared.
line = line.strip()
if not line:
break # Client sent a blank line.
line = line.decode('utf8') # Protocol uses UTF-8.
for l in line.split(NEWLINE):
self.debug(l, kind='<')
command = Command(line)
try:
func = command.delegate('ctrl_', self)
yield bluelet.call(func(*command.args))
except (AttributeError, TypeError) as e:
yield self.send('ERROR: {}'.format(e.args[0]))
except Exception:
yield self.send(['ERROR: server error',
traceback.format_exc().rstrip()])
def ctrl_play_finished(self):
"""Callback from the player signalling a song finished playing.
"""
yield bluelet.call(self.server.dispatch_events())
def ctrl_profile(self):
"""Memory profiling for debugging.
"""
from guppy import hpy
heap = hpy().heap()
yield self.send(heap)
def ctrl_nickname(self, oldlabel, newlabel):
"""Rename a client in the log messages.
"""
for c in self.server.connections:
if c.address == oldlabel:
c.address = newlabel
break
else:
yield self.send(f'ERROR: no such client: {oldlabel}')
class Command:
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(r'^([^ \t]+)[ \t]*')
arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end():])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace('\\"', '"').replace('\\\\', '\\')
else:
# Unquoted argument.
arg = match[1]
self.args.append(arg)
def delegate(self, prefix, target, extra_args=0):
"""Get the target method that corresponds to this command.
The `prefix` is prepended to the command name and then the resulting
name is used to search `target` for a method with a compatible number
of arguments.
"""
# Attempt to get correct command function.
func_name = prefix + self.name
if not hasattr(target, func_name):
raise AttributeError(f'unknown command "{self.name}"')
func = getattr(target, func_name)
argspec = inspect.getfullargspec(func)
# Check that `func` is able to handle the number of arguments sent
# by the client (so we can raise ERROR_ARG instead of ERROR_SYSTEM).
# Maximum accepted arguments: argspec includes "self".
max_args = len(argspec.args) - 1 - extra_args
# Minimum accepted arguments: some arguments might be optional.
min_args = max_args
if argspec.defaults:
min_args -= len(argspec.defaults)
wrong_num = (len(self.args) > max_args) or (len(self.args) < min_args)
# If the command accepts a variable number of arguments skip the check.
if wrong_num and not argspec.varargs:
raise TypeError('wrong number of arguments for "{}"'
.format(self.name), self.name)
return func
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
try:
# `conn` is an extra argument to all cmd handlers.
func = self.delegate('cmd_', conn.server, extra_args=1)
except AttributeError as e:
raise BPDError(ERROR_UNKNOWN, e.args[0])
except TypeError as e:
raise BPDError(ERROR_ARG, e.args[0], self.name)
# Ensure we have permission for this command.
if conn.server.password and \
not conn.authenticated and \
self.name not in SAFE_COMMANDS:
raise BPDError(ERROR_PERMISSION, 'insufficient privileges')
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDClose:
# An indication that the connection should close. Send
# it on the Connection.
raise
except BPDIdle:
raise
except Exception:
# An "unintentional" error. Hide it from the client.
conn.server._log.error('{}', traceback.format_exc())
raise BPDError(ERROR_SYSTEM, 'server error', self.name)
class CommandList(list):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list.
"""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimiter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password, ctrl_port, log):
try:
from beetsplug.bpd import gstplayer
except ImportError as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
log.info('Starting server...')
super().__init__(host, port, password, ctrl_port, log)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
log.info('Server ready and listening on {}:{}'.format(
host, port))
log.debug('Listening for control signals on {}:{}'.format(
host, ctrl_port))
def run(self):
self.player.run()
super().run()
def play_finished(self):
"""A callback invoked every time our player finishes a track.
"""
self.cmd_next(None)
self._ctrl_send('play_finished')
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
'file: ' + item.destination(fragment=True),
'Time: ' + str(int(item.length)),
'duration: ' + f'{item.length:.3f}',
'Id: ' + str(item.id),
]
try:
pos = self._id_to_index(item.id)
info_lines.append('Pos: ' + str(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
for tagtype, field in self.tagtype_map.items():
info_lines.append('{}: {}'.format(
tagtype, str(getattr(item, field))))
return info_lines
def _parse_range(self, items, accept_single_number=False):
"""Convert a range of positions to a list of item info.
MPD specifies ranges as START:STOP (endpoint excluded) for some
commands. Sometimes a single number can be provided instead.
"""
try:
start, stop = str(items).split(':', 1)
except ValueError:
if accept_single_number:
return [cast_arg(int, items)]
raise BPDError(ERROR_ARG, 'bad range syntax')
start = cast_arg(int, start)
stop = cast_arg(int, stop)
return range(start, stop)
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path='/'):
"""Updates the catalog to reflect the current database state.
"""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
self._log.debug('Building directory tree...')
self.tree = vfs.libtree(self.lib)
self._log.debug('Finished building directory tree.')
self.updated_time = time.time()
self._send_event('update')
self._send_event('database')
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split('/')
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = p1 + '/' + p2
return out.replace('//', '/').replace('//', '/')
def cmd_lsinfo(self, conn, path="/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, 'this is not a directory')
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.items())):
dirpath = self._path_join(path, name)
if dirpath.startswith("/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield 'directory: %s' % dirpath
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield 'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.items()):
newpath = self._path_join(basepath, name)
# "yield from"
yield from self._listall(newpath, itemid, info)
for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name)
yield 'directory: ' + newpath
yield from self._listall(newpath, subdir, info)
def cmd_listall(self, conn, path="/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path="/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node.
"""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.items()):
# "yield from"
yield from self._all_items(itemid)
for name, subdir in sorted(node.dirs.items()):
yield from self._all_items(subdir)
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield 'Id: ' + str(item.id)
self.playlist_version += 1
self._send_event('playlist')
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
yield from super().cmd_status(conn)
if self.current_index > -1:
item = self.playlist[self.current_index]
yield (
'bitrate: ' + str(item.bitrate / 1000),
'audio: {}:{}:{}'.format(
str(item.samplerate),
str(item.bitdepth),
str(item.channels),
),
)
(pos, total) = self.player.time()
yield (
'time: {}:{}'.format(
str(int(pos)),
str(int(total)),
),
'elapsed: ' + f'{pos:.3f}',
'duration: ' + f'{total:.3f}',
)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = 'SELECT COUNT(DISTINCT artist), ' \
'COUNT(DISTINCT album), ' \
'COUNT(id), ' \
'SUM(length) ' \
'FROM items'
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
'artists: ' + str(artists),
'albums: ' + str(albums),
'songs: ' + str(songs),
'uptime: ' + str(int(time.time() - self.startup_time)),
'playtime: ' + '0', # Missing.
'db_playtime: ' + str(int(totaltime)),
'db_update: ' + str(int(self.updated_time)),
)
def cmd_decoders(self, conn):
"""Send list of supported decoders and formats."""
decoders = self.player.get_decoders()
for name, (mimes, exts) in decoders.items():
yield f'plugin: {name}'
for ext in exts:
yield f'suffix: {ext}'
for mime in mimes:
yield f'mime_type: {mime}'
# Searching.
tagtype_map = {
'Artist': 'artist',
'ArtistSort': 'artist_sort',
'Album': 'album',
'Title': 'title',
'Track': 'track',
'AlbumArtist': 'albumartist',
'AlbumArtistSort': 'albumartist_sort',
'Label': 'label',
'Genre': 'genre',
'Date': 'year',
'OriginalDate': 'original_year',
'Composer': 'composer',
'Disc': 'disc',
'Comment': 'comments',
'MUSICBRAINZ_TRACKID': 'mb_trackid',
'MUSICBRAINZ_ALBUMID': 'mb_albumid',
'MUSICBRAINZ_ARTISTID': 'mb_artistid',
'MUSICBRAINZ_ALBUMARTISTID': 'mb_albumartistid',
'MUSICBRAINZ_RELEASETRACKID': 'mb_releasetrackid',
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield 'tagtype: ' + tag
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, 'no such tagtype')
def _metadata_query(self, query_type, any_query_type, kv):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == 'any':
if any_query_type:
queries.append(any_query_type(value,
ITEM_KEYS_WRITABLE,
query_type))
else:
raise BPDError(ERROR_UNKNOWN, 'no such tagtype')
else:
_, key = self._tagtype_lookup(tag)
queries.append(query_type(key, value))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(dbcore.query.SubstringQuery,
dbcore.query.AnyFieldQuery,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery,
None,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
if len(kv) == 1:
if show_tag_canon == 'Album':
# If no tag was given, assume artist. This is because MPD
# supports a short version of this command for fetching the
# albums belonging to a particular artist, and some clients
# rely on this behaviour (e.g. MPDroid, M.A.L.P.).
kv = ('Artist', kv[0])
else:
raise BPDError(ERROR_ARG, 'should be "Album" for 3 arguments')
elif len(kv) % 2 != 0:
raise BPDError(ERROR_ARG, 'Incorrect number of filter arguments')
query = self._metadata_query(dbcore.query.MatchQuery, None, kv)
clause, subvals = query.clause()
statement = 'SELECT DISTINCT ' + show_key + \
' FROM items WHERE ' + clause + \
' ORDER BY ' + show_key
self._log.debug(statement)
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
if not row[0]:
# Skip any empty values of the field.
continue
yield show_tag_canon + ': ' + str(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield 'songs: ' + str(songs)
yield 'playtime: ' + str(int(playtime))
# Persistent playlist manipulation. In MPD this is an optional feature so
# these dummy implementations match MPD's behaviour with the feature off.
def cmd_listplaylist(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, 'No such playlist')
def cmd_listplaylistinfo(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, 'No such playlist')
def cmd_listplaylists(self, conn):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_load(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, 'Stored playlists are disabled')
def cmd_playlistadd(self, conn, playlist, uri):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_playlistclear(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_playlistdelete(self, conn, playlist, index):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_playlistmove(self, conn, playlist, from_index, to_index):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_rename(self, conn, playlist, new_name):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_rm(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
def cmd_save(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, 'Stored playlists are disabled')
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
'outputid: 0',
'outputname: gstreamer',
'outputenabled: 1',
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, 'cannot disable this output')
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super().cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super().cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super().cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(float, pos)
super().cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super().cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super().__init__()
self.config.add({
'host': '',
'port': 6600,
'control_port': 6601,
'password': '',
'volume': VOLUME_MAX,
})
self.config['password'].redact = True
def start_bpd(self, lib, host, port, password, volume, ctrl_port):
"""Starts a BPD server."""
try:
server = Server(lib, host, port, password, ctrl_port, self._log)
server.cmd_setvol(None, volume)
server.run()
except NoGstreamerError:
self._log.error('Gstreamer Python bindings not found.')
self._log.error('Install "gstreamer1.0" and "python-gi"'
'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
'bpd', help='run an MPD-compatible music player server'
)
def func(lib, opts, args):
host = self.config['host'].as_str()
host = args.pop(0) if args else host
port = args.pop(0) if args else self.config['port'].get(int)
if args:
ctrl_port = args.pop(0)
else:
ctrl_port = self.config['control_port'].get(int)
if args:
raise beets.ui.UserError('too many arguments')
password = self.config['password'].as_str()
volume = self.config['volume'].get(int)
self.start_bpd(lib, host, int(port), password, volume,
int(ctrl_port))
cmd.func = func
return [cmd]
| 57,378
|
Python
|
.py
| 1,380
| 31.29058
| 79
| 0.580156
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,686
|
__init__.py
|
rembo10_headphones/lib/beetsplug/web/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A Web interface to beets."""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import beets.library
import flask
from flask import g, jsonify
from werkzeug.routing import BaseConverter, PathConverter
import os
from unidecode import unidecode
import json
import base64
# Utilities.
def _rep(obj, expand=False):
"""Get a flat -- i.e., JSON-ish -- representation of a beets Item or
Album object. For Albums, `expand` dictates whether tracks are
included.
"""
out = dict(obj)
if isinstance(obj, beets.library.Item):
if app.config.get('INCLUDE_PATHS', False):
out['path'] = util.displayable_path(out['path'])
else:
del out['path']
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode('ascii')
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
out['size'] = os.path.getsize(util.syspath(obj.path))
except OSError:
out['size'] = 0
return out
elif isinstance(obj, beets.library.Album):
if app.config.get('INCLUDE_PATHS', False):
out['artpath'] = util.displayable_path(out['artpath'])
else:
del out['artpath']
if expand:
out['items'] = [_rep(item) for item in obj.items()]
return out
def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings
"""
yield '{"%s":[' % root
first = True
for item in items:
if first:
first = False
else:
yield ','
yield json.dumps(_rep(item, expand=expand))
yield ']}'
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get('expand') is not None
def is_delete():
"""Returns whether the current delete request should remove the selected
files.
"""
return flask.request.args.get('delete') is not None
def get_method():
"""Returns the HTTP method of the current request."""
return flask.request.method
def resource(name, patchable=False):
"""Decorates a function to handle RESTful HTTP requests for a resource.
"""
def make_responder(retriever):
def responder(ids):
entities = [retriever(id) for id in ids]
entities = [entity for entity in entities if entity]
if get_method() == "DELETE":
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({'deleted': True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
elif get_method() == "GET":
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
else:
return flask.abort(404)
else:
return flask.abort(405)
responder.__name__ = f'get_{name}'
return responder
return make_responder
def resource_query(name, patchable=False):
"""Decorates a function to handle RESTful HTTP queries for resources.
"""
def make_responder(query_func):
def responder(queries):
entities = query_func(queries)
if get_method() == "DELETE":
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({'deleted': True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get('READONLY', True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
elif get_method() == "GET":
return app.response_class(
json_generator(
entities,
root='results', expand=is_expand()
),
mimetype='application/json'
)
else:
return flask.abort(405)
responder.__name__ = f'query_{name}'
return responder
return make_responder
def resource_list(name):
"""Decorates a function to handle RESTful HTTP request for a list of
resources.
"""
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name, expand=is_expand()),
mimetype='application/json'
)
responder.__name__ = f'all_{name}'
return responder
return make_responder
def _get_unique_table_field_values(model, field, sort_field):
""" retrieve all unique values belonging to a key from a model """
if field not in model.all_keys() or sort_field not in model.all_keys():
raise KeyError
with g.lib.transaction() as tx:
rows = tx.query('SELECT DISTINCT "{}" FROM "{}" ORDER BY "{}"'
.format(field, model._table, sort_field))
return [row[0] for row in rows]
class IdListConverter(BaseConverter):
"""Converts comma separated lists of ids in urls to integer lists.
"""
def to_python(self, value):
ids = []
for id in value.split(','):
try:
ids.append(int(id))
except ValueError:
pass
return ids
def to_url(self, value):
return ','.join(str(v) for v in value)
class QueryConverter(PathConverter):
"""Converts slash separated lists of queries in the url to string list.
"""
def to_python(self, value):
queries = value.split('/')
"""Do not do path substitution on regex value tests"""
return [query if '::' in query else query.replace('\\', os.sep)
for query in queries]
def to_url(self, value):
return ','.join([v.replace(os.sep, '\\') for v in value])
class EverythingConverter(PathConverter):
regex = '.*?'
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter
app.url_map.converters['everything'] = EverythingConverter
@app.before_request
def before_request():
g.lib = app.config['lib']
# Items.
@app.route('/item/<idlist:ids>', methods=["GET", "DELETE", "PATCH"])
@resource('items', patchable=True)
def get_item(id):
return g.lib.get_item(id)
@app.route('/item/')
@app.route('/item/query/')
@resource_list('items')
def all_items():
return g.lib.items()
@app.route('/item/<int:item_id>/file')
def item_file(item_id):
item = g.lib.get_item(item_id)
# On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
# *always* wants a Unicode path.
if os.name == 'nt':
item_path = util.syspath(item.path)
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path,
as_attachment=True,
attachment_filename=safe_filename
)
response.headers['Content-Length'] = os.path.getsize(item_path)
return response
@app.route('/item/query/<query:queries>', methods=["GET", "DELETE", "PATCH"])
@resource_query('items', patchable=True)
def item_query(queries):
return g.lib.items(queries)
@app.route('/item/path/<everything:path>')
def item_at_path(path):
query = beets.library.PathQuery('path', path.encode('utf-8'))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route('/item/values/<string:key>')
def item_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Item, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Albums.
@app.route('/album/<idlist:ids>', methods=["GET", "DELETE"])
@resource('albums')
def get_album(id):
return g.lib.get_album(id)
@app.route('/album/')
@app.route('/album/query/')
@resource_list('albums')
def all_albums():
return g.lib.albums()
@app.route('/album/query/<query:queries>', methods=["GET", "DELETE"])
@resource_query('albums')
def album_query(queries):
return g.lib.albums(queries)
@app.route('/album/<int:album_id>/art')
def album_art(album_id):
album = g.lib.get_album(album_id)
if album and album.artpath:
return flask.send_file(album.artpath.decode())
else:
return flask.abort(404)
@app.route('/album/values/<string:key>')
def album_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
try:
values = _get_unique_table_field_values(beets.library.Album, key,
sort_key)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Artists.
@app.route('/artist/')
def all_artists():
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT albumartist FROM albums")
all_artists = [row[0] for row in rows]
return flask.jsonify(artist_names=all_artists)
# Library information.
@app.route('/stats')
def stats():
with g.lib.transaction() as tx:
item_rows = tx.query("SELECT COUNT(*) FROM items")
album_rows = tx.query("SELECT COUNT(*) FROM albums")
return flask.jsonify({
'items': item_rows[0][0],
'albums': album_rows[0][0],
})
# UI.
@app.route('/')
def home():
return flask.render_template('index.html')
# Plugin hook.
class WebPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({
'host': '127.0.0.1',
'port': 8337,
'cors': '',
'cors_supports_credentials': False,
'reverse_proxy': False,
'include_paths': False,
'readonly': True,
})
def commands(self):
cmd = ui.Subcommand('web', help='start a Web interface')
cmd.parser.add_option('-d', '--debug', action='store_true',
default=False, help='debug mode')
def func(lib, opts, args):
args = ui.decargs(args)
if args:
self.config['host'] = args.pop(0)
if args:
self.config['port'] = int(args.pop(0))
app.config['lib'] = lib
# Normalizes json output
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['INCLUDE_PATHS'] = self.config['include_paths']
app.config['READONLY'] = self.config['readonly']
# Enable CORS if required.
if self.config['cors']:
self._log.info('Enabling CORS with origin: {0}',
self.config['cors'])
from flask_cors import CORS
app.config['CORS_ALLOW_HEADERS'] = "Content-Type"
app.config['CORS_RESOURCES'] = {
r"/*": {"origins": self.config['cors'].get(str)}
}
CORS(
app,
supports_credentials=self.config[
'cors_supports_credentials'
].get(bool)
)
# Allow serving behind a reverse proxy
if self.config['reverse_proxy']:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application.
app.run(host=self.config['host'].as_str(),
port=self.config['port'].get(int),
debug=opts.debug, threaded=True)
cmd.func = func
return [cmd]
class ReverseProxied:
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
| 15,799
|
Python
|
.py
| 405
| 29.809877
| 79
| 0.595954
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,687
|
amarok.py
|
rembo10_headphones/lib/beetsplug/metasync/amarok.py
|
# This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from amarok's library via dbus
"""
from os.path import basename
from datetime import datetime
from time import mktime
from xml.sax.saxutils import quoteattr
from beets.util import displayable_path
from beets.dbcore import types
from beets.library import DateType
from beetsplug.metasync import MetaSource
def import_dbus():
try:
return __import__('dbus')
except ImportError:
return None
dbus = import_dbus()
class Amarok(MetaSource):
item_types = {
'amarok_rating': types.INTEGER,
'amarok_score': types.FLOAT,
'amarok_uid': types.STRING,
'amarok_playcount': types.INTEGER,
'amarok_firstplayed': DateType(),
'amarok_lastplayed': DateType(),
}
query_xml = '<query version="1.0"> \
<filters> \
<and><include field="filename" value=%s /></and> \
</filters> \
</query>'
def __init__(self, config, log):
super().__init__(config, log)
if not dbus:
raise ImportError('failed to import dbus')
self.collection = \
dbus.SessionBus().get_object('org.kde.amarok', '/Collection')
def sync_from_source(self, item):
path = displayable_path(item.path)
# amarok unfortunately doesn't allow searching for the full path, only
# for the patch relative to the mount point. But the full path is part
# of the result set. So query for the filename and then try to match
# the correct item from the results we get back
results = self.collection.Query(
self.query_xml % quoteattr(basename(path))
)
for result in results:
if result['xesam:url'] != path:
continue
item.amarok_rating = result['xesam:userRating']
item.amarok_score = result['xesam:autoRating']
item.amarok_playcount = result['xesam:useCount']
item.amarok_uid = \
result['xesam:id'].replace('amarok-sqltrackuid://', '')
if result['xesam:firstUsed'][0][0] != 0:
# These dates are stored as timestamps in amarok's db, but
# exposed over dbus as fixed integers in the current timezone.
first_played = datetime(
result['xesam:firstUsed'][0][0],
result['xesam:firstUsed'][0][1],
result['xesam:firstUsed'][0][2],
result['xesam:firstUsed'][1][0],
result['xesam:firstUsed'][1][1],
result['xesam:firstUsed'][1][2]
)
if result['xesam:lastUsed'][0][0] != 0:
last_played = datetime(
result['xesam:lastUsed'][0][0],
result['xesam:lastUsed'][0][1],
result['xesam:lastUsed'][0][2],
result['xesam:lastUsed'][1][0],
result['xesam:lastUsed'][1][1],
result['xesam:lastUsed'][1][2]
)
else:
last_played = first_played
item.amarok_firstplayed = mktime(first_played.timetuple())
item.amarok_lastplayed = mktime(last_played.timetuple())
| 4,017
|
Python
|
.py
| 90
| 33.944444
| 78
| 0.591246
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,688
|
__init__.py
|
rembo10_headphones/lib/beetsplug/metasync/__init__.py
|
# This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from music player libraries
"""
from abc import abstractmethod, ABCMeta
from importlib import import_module
from confuse import ConfigValueError
from beets import ui
from beets.plugins import BeetsPlugin
METASYNC_MODULE = 'beetsplug.metasync'
# Dictionary to map the MODULE and the CLASS NAME of meta sources
SOURCES = {
'amarok': 'Amarok',
'itunes': 'Itunes',
}
class MetaSource(metaclass=ABCMeta):
def __init__(self, config, log):
self.item_types = {}
self.config = config
self._log = log
@abstractmethod
def sync_from_source(self, item):
pass
def load_meta_sources():
""" Returns a dictionary of all the MetaSources
E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true
"""
meta_sources = {}
for module_path, class_name in SOURCES.items():
module = import_module(METASYNC_MODULE + '.' + module_path)
meta_sources[class_name.lower()] = getattr(module, class_name)
return meta_sources
META_SOURCES = load_meta_sources()
def load_item_types():
""" Returns a dictionary containing the item_types of all the MetaSources
"""
item_types = {}
for meta_source in META_SOURCES.values():
item_types.update(meta_source.item_types)
return item_types
class MetaSyncPlugin(BeetsPlugin):
item_types = load_item_types()
def __init__(self):
super().__init__()
def commands(self):
cmd = ui.Subcommand('metasync',
help='update metadata from music player libraries')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show all changes but do nothing')
cmd.parser.add_option('-s', '--source', default=[],
action='append', dest='sources',
help='comma-separated list of sources to sync')
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the metasync function.
"""
pretend = opts.pretend
query = ui.decargs(args)
sources = []
for source in opts.sources:
sources.extend(source.split(','))
sources = sources or self.config['source'].as_str_seq()
meta_source_instances = {}
items = lib.items(query)
# Avoid needlessly instantiating meta sources (can be expensive)
if not items:
self._log.info('No items found matching query')
return
# Instantiate the meta sources
for player in sources:
try:
cls = META_SOURCES[player]
except KeyError:
self._log.error('Unknown metadata source \'{}\''.format(
player))
try:
meta_source_instances[player] = cls(self.config, self._log)
except (ImportError, ConfigValueError) as e:
self._log.error('Failed to instantiate metadata source '
'\'{}\': {}'.format(player, e))
# Avoid needlessly iterating over items
if not meta_source_instances:
self._log.error('No valid metadata sources found')
return
# Sync the items with all of the meta sources
for item in items:
for meta_source in meta_source_instances.values():
meta_source.sync_from_source(item)
changed = ui.show_model_changes(item)
if changed and not pretend:
item.store()
| 4,281
|
Python
|
.py
| 104
| 32.740385
| 79
| 0.634323
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,689
|
itunes.py
|
rembo10_headphones/lib/beetsplug/metasync/itunes.py
|
# This file is part of beets.
# Copyright 2016, Tom Jaspers.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from iTunes's library
"""
from contextlib import contextmanager
import os
import shutil
import tempfile
import plistlib
from urllib.parse import urlparse, unquote
from time import mktime
from beets import util
from beets.dbcore import types
from beets.library import DateType
from confuse import ConfigValueError
from beetsplug.metasync import MetaSource
@contextmanager
def create_temporary_copy(path):
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, 'temp_itunes_lib')
shutil.copyfile(path, temp_path)
try:
yield temp_path
finally:
shutil.rmtree(temp_dir)
def _norm_itunes_path(path):
# Itunes prepends the location with 'file://' on posix systems,
# and with 'file://localhost/' on Windows systems.
# The actual path to the file is always saved as posix form
# E.g., 'file://Users/Music/bar' or 'file://localhost/G:/Music/bar'
# The entire path will also be capitalized (e.g., '/Music/Alt-J')
# Note that this means the path will always have a leading separator,
# which is unwanted in the case of Windows systems.
# E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar'
return util.bytestring_path(os.path.normpath(
unquote(urlparse(path).path)).lstrip('\\')).lower()
class Itunes(MetaSource):
item_types = {
'itunes_rating': types.INTEGER, # 0..100 scale
'itunes_playcount': types.INTEGER,
'itunes_skipcount': types.INTEGER,
'itunes_lastplayed': DateType(),
'itunes_lastskipped': DateType(),
'itunes_dateadded': DateType(),
}
def __init__(self, config, log):
super().__init__(config, log)
config.add({'itunes': {
'library': '~/Music/iTunes/iTunes Library.xml'
}})
# Load the iTunes library, which has to be the .xml one (not the .itl)
library_path = config['itunes']['library'].as_filename()
try:
self._log.debug(
f'loading iTunes library from {library_path}')
with create_temporary_copy(library_path) as library_copy:
with open(library_copy, 'rb') as library_copy_f:
raw_library = plistlib.load(library_copy_f)
except OSError as e:
raise ConfigValueError('invalid iTunes library: ' + e.strerror)
except Exception:
# It's likely the user configured their '.itl' library (<> xml)
if os.path.splitext(library_path)[1].lower() != '.xml':
hint = ': please ensure that the configured path' \
' points to the .XML library'
else:
hint = ''
raise ConfigValueError('invalid iTunes library' + hint)
# Make the iTunes library queryable using the path
self.collection = {_norm_itunes_path(track['Location']): track
for track in raw_library['Tracks'].values()
if 'Location' in track}
def sync_from_source(self, item):
result = self.collection.get(util.bytestring_path(item.path).lower())
if not result:
self._log.warning(f'no iTunes match found for {item}')
return
item.itunes_rating = result.get('Rating')
item.itunes_playcount = result.get('Play Count')
item.itunes_skipcount = result.get('Skip Count')
if result.get('Play Date UTC'):
item.itunes_lastplayed = mktime(
result.get('Play Date UTC').timetuple())
if result.get('Skip Date'):
item.itunes_lastskipped = mktime(
result.get('Skip Date').timetuple())
if result.get('Date Added'):
item.itunes_dateadded = mktime(
result.get('Date Added').timetuple())
| 4,479
|
Python
|
.py
| 100
| 37.01
| 78
| 0.651355
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,690
|
lexer.py
|
rembo10_headphones/lib/mako/lexer.py
|
# mako/lexer.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import codecs
import re
from mako import compat
from mako import exceptions
from mako import parsetree
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(
self,
text,
filename=None,
disable_unicode=False,
input_encoding=None,
preprocessor=None,
):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.ternary_stack = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "support disabling Unicode"
)
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, "__iter__"):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {
"source": self.text,
"lineno": self.matched_lineno,
"pos": self.matched_charpos,
"filename": self.filename,
}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text
position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp : self.match_position])
cp = mp - 1
while cp >= 0 and cp < self.textlength and self.text[cp] != "\n":
cp -= 1
self.matched_charpos = mp - cp
self.lineno += len(lines)
# print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
# print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
# (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, watch_nesting, *text):
startpos = self.match_position
text_re = r"|".join(text)
brace_level = 0
paren_level = 0
bracket_level = 0
while True:
match = self.match(r"#.*\n")
if match:
continue
match = self.match(
r"(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1", re.S
)
if match:
continue
match = self.match(r"(%s)" % text_re)
if match and not (
watch_nesting
and (brace_level > 0 or paren_level > 0 or bracket_level > 0)
):
return (
self.text[
startpos : self.match_position - len(match.group(1))
],
match.group(1),
)
elif not match:
match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
if match:
brace_level += match.group(1).count("{")
brace_level -= match.group(1).count("}")
paren_level += match.group(1).count("(")
paren_level -= match.group(1).count(")")
bracket_level += match.group(1).count("[")
bracket_level -= match.group(1).count("]")
continue
raise exceptions.SyntaxException(
"Expected: %s" % ",".join(text), **self.exception_kwargs
)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault("source", self.text)
kwargs.setdefault("lineno", self.matched_lineno)
kwargs.setdefault("pos", self.matched_charpos)
kwargs["filename"] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
# build a set of child nodes for the control line
# (used for loop variable detection)
# also build a set of child nodes on ternary control lines
# (used for determining if a pass needs to be auto-inserted
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
if not (
isinstance(node, parsetree.ControlLine)
and control_frame.is_ternary(node.keyword)
):
if self.ternary_stack and self.ternary_stack[-1]:
self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
self.ternary_stack.pop()
elif node.is_primary:
self.control_line.append(node)
self.ternary_stack.append([])
elif self.control_line and self.control_line[-1].is_ternary(
node.keyword
):
self.ternary_stack[-1].append(node)
elif self.control_line and not self.control_line[-1].is_ternary(
node.keyword
):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'"
% (node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs
)
_coding_re = re.compile(r"#.*coding[:=]\s*([-\w.]+).*\r?\n")
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, compat.text_type):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or "utf-8"
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8) :]
parsed_encoding = "utf-8"
m = self._coding_re.match(text.decode("utf-8", "ignore"))
if m is not None and m.group(1) != "utf-8":
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode("utf-8", "ignore"),
0,
0,
filename,
)
else:
m = self._coding_re.match(text.decode("utf-8", "ignore"))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or "utf-8"
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed"
% parsed_encoding,
text.decode("utf-8", "ignore"),
0,
0,
filename,
)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(
self.text, not self.disable_unicode, self.encoding, self.filename
)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while True:
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" % self.tag[-1].keyword,
**self.exception_kwargs
)
if len(self.control_line):
raise exceptions.SyntaxException(
"Unterminated control keyword: '%s'"
% self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos,
self.filename,
)
return self.template
def match_tag_start(self):
match = self.match(
r"""
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
# sign, string expression
\s* # more whitespace
(/)?> # closing
""",
re.I | re.S | re.X,
)
if match:
keyword, attr, isend = match.groups()
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(
r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr
):
key, val1, val2 = att
text = val1 or val2
text = text.replace("\r\n", "\n")
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == "text":
match = self.match(r"(.*?)(?=\</%text>)", re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" % self.tag[-1].keyword,
**self.exception_kwargs
)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r"\</%[\t ]*(.+?)[\t ]*>")
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>"
% match.group(1),
**self.exception_kwargs
)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>"
% (match.group(1), self.tag[-1].keyword),
**self.exception_kwargs
)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r"\Z", re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(
r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""",
re.X | re.S,
)
if match:
text = match.group(1)
if text:
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(False, r"%>")
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1) == "!",
lineno=line,
pos=pos,
)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(True, r"\|", r"}")
if end == "|":
escapes, end = self.parse_until_text(True, r"}")
else:
escapes = ""
text = text.replace("\r\n", "\n")
self.append_node(
parsetree.Expression,
text,
escapes.strip(),
lineno=line,
pos=pos,
)
return True
else:
return False
def match_control_line(self):
match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)",
re.M,
)
if match:
operator = match.group(1)
text = match.group(2)
if operator == "%":
m2 = re.match(r"(end)?(\w+)\s*(.*)", text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" % text,
**self.exception_kwargs
)
isend, keyword = m2.group(1, 2)
isend = isend is not None
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'"
% (keyword, text),
**self.exception_kwargs
)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'"
% (text, self.control_line[-1].keyword),
**self.exception_kwargs
)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
| 16,927
|
Python
|
.py
| 441
| 24.505669
| 79
| 0.476608
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,691
|
filters.py
|
rembo10_headphones/lib/mako/filters.py
|
# mako/filters.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import re
from mako import compat
from mako.compat import codepoint2name
from mako.compat import name2codepoint
from mako.compat import quote_plus
from mako.compat import unquote_plus
xml_escapes = {
"&": "&",
">": ">",
"<": "<",
'"': """, # also " in html-only
"'": "'", # also ' in html-only
}
# XXX: " is valid in HTML and XML
# ' is not valid HTML, but is valid XML
def legacy_html_escape(s):
"""legacy HTML escape for non-unicode mode."""
s = s.replace("&", "&")
s = s.replace(">", ">")
s = s.replace("<", "<")
s = s.replace('"', """)
s = s.replace("'", "'")
return s
try:
import markupsafe
html_escape = markupsafe.escape
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return quote_plus(string)
def legacy_url_escape(string):
# convert into a list of octets
return quote_plus(string)
def url_unescape(string):
text = unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, compat.text_type):
return x
elif not isinstance(x, compat.binary_type):
return decode(str(x))
else:
return compat.text_type(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r"\A[\x00-\x7f]*\Z")
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict(
[
(c, compat.text_type("&%s;" % n))
for c, n in codepoint2name.items()
]
)
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return compat.text_type(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return "&#x%X;" % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(
self.__escape, compat.text_type(text)
).encode("ascii")
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(
r"""& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;""",
re.X | re.UNICODE,
)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xFFFD)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return chr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python codecs error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references::
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start : ex.end]
text = _html_entities_escaper.escape(bad_text)
return (compat.text_type(text), ex.end)
raise ex
codecs.register_error("htmlentityreplace", htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later
# release
DEFAULT_ESCAPES = {
"x": "filters.xml_escape",
"h": "filters.html_escape",
"u": "filters.url_escape",
"trim": "filters.trim",
"entity": "filters.html_entities_escape",
"unicode": "unicode",
"decode": "decode",
"str": "str",
"n": "n",
}
if compat.py3k:
DEFAULT_ESCAPES.update({"unicode": "str"})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES["h"] = "filters.legacy_html_escape"
NON_UNICODE_ESCAPES["u"] = "filters.legacy_url_escape"
| 6,063
|
Python
|
.py
| 162
| 30.197531
| 77
| 0.614648
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,692
|
template.py
|
rembo10_headphones/lib/mako/template.py
|
# mako/template.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Template class, a facade for parsing, generating and executing
template strings, as well as template runtime operations."""
import json
import os
import re
import shutil
import stat
import tempfile
import types
import weakref
from mako import cache
from mako import codegen
from mako import compat
from mako import exceptions
from mako import runtime
from mako import util
from mako.lexer import Lexer
class Template(object):
r"""Represents a compiled template.
:class:`.Template` includes a reference to the original
template source (via the :attr:`.source` attribute)
as well as the source code of the
generated Python module (i.e. the :attr:`.code` attribute),
as well as a reference to an actual Python module.
:class:`.Template` is constructed using either a literal string
representing the template text, or a filename representing a filesystem
path to a source file.
:param text: textual template source. This argument is mutually
exclusive versus the ``filename`` parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the ``text`` parameter.
:param buffer_filters: string list of filters to be applied
to the output of ``%def``\ s which are buffered, cached, or otherwise
filtered, after all filters
defined with the ``%def`` itself have been applied. Allows the
creation of default expression filters that let the output
of return-valued ``%def``\ s "opt out" of that filtering via
passing special attributes or objects.
:param bytestring_passthrough: When ``True``, and ``output_encoding`` is
set to ``None``, and :meth:`.Template.render` is used to render,
the `StringIO` or `cStringIO` buffer will be used instead of the
default "fast" buffer. This allows raw bytestrings in the
output stream, such as in expressions, to pass straight
through to the buffer. This flag is forced
to ``True`` if ``disable_unicode`` is also configured.
.. versionadded:: 0.4
Added to provide the same behavior as that of the previous series.
:param cache_args: Dictionary of cache configuration arguments that
will be passed to the :class:`.CacheImpl`. See :ref:`caching_toplevel`.
:param cache_dir:
.. deprecated:: 0.6
Use the ``'dir'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_enabled: Boolean flag which enables caching of this
template. See :ref:`caching_toplevel`.
:param cache_impl: String name of a :class:`.CacheImpl` caching
implementation to use. Defaults to ``'beaker'``.
:param cache_type:
.. deprecated:: 0.6
Use the ``'type'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_url:
.. deprecated:: 0.6
Use the ``'url'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param default_filters: List of string filter names that will
be applied to all expressions. See :ref:`filtering_default_filters`.
:param disable_unicode: Disables all awareness of Python Unicode
objects. See :ref:`unicode_disabled`.
:param enable_loop: When ``True``, enable the ``loop`` context variable.
This can be set to ``False`` to support templates that may
be making usage of the name "``loop``". Individual templates can
re-enable the "loop" context by placing the directive
``enable_loop="True"`` inside the ``<%page>`` tag -- see
:ref:`migrating_loop`.
:param encoding_errors: Error parameter passed to ``encode()`` when
string encoding is performed. See :ref:`usage_unicode`.
:param error_handler: Python callable which is called whenever
compile or runtime exceptions occur. The callable is passed
the current context as well as the exception. If the
callable returns ``True``, the exception is considered to
be handled, else it is re-raised after the function
completes. Is used to provide custom error-rendering
functions.
.. seealso::
:paramref:`.Template.include_error_handler` - include-specific
error handler function
:param format_exceptions: if ``True``, exceptions which occur during
the render phase of this template will be caught and
formatted into an HTML error page, which then becomes the
rendered result of the :meth:`.render` call. Otherwise,
runtime exceptions are propagated outwards.
:param imports: String list of Python statements, typically individual
"import" lines, which will be placed into the module level
preamble of all generated Python modules. See the example
in :ref:`filtering_default_filters`.
:param future_imports: String list of names to import from `__future__`.
These will be concatenated into a comma-separated string and inserted
into the beginning of the template, e.g. ``futures_imports=['FOO',
'BAR']`` results in ``from __future__ import FOO, BAR``. If you're
interested in using features like the new division operator, you must
use future_imports to convey that to the renderer, as otherwise the
import will not appear as the first executed statement in the generated
code and will therefore not have the desired effect.
:param include_error_handler: An error handler that runs when this template
is included within another one via the ``<%include>`` tag, and raises an
error. Compare to the :paramref:`.Template.error_handler` option.
.. versionadded:: 1.0.6
.. seealso::
:paramref:`.Template.error_handler` - top-level error handler function
:param input_encoding: Encoding of the template's source code. Can
be used in lieu of the coding comment. See
:ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
details on source encoding.
:param lookup: a :class:`.TemplateLookup` instance that will be used
for all file lookups via the ``<%namespace>``,
``<%include>``, and ``<%inherit>`` tags. See
:ref:`usage_templatelookup`.
:param module_directory: Filesystem location where generated
Python module files will be placed.
:param module_filename: Overrides the filename of the generated
Python module file. For advanced usage only.
:param module_writer: A callable which overrides how the Python
module is written entirely. The callable is passed the
encoded source content of the module and the destination
path to be written to. The default behavior of module writing
uses a tempfile in conjunction with a file move in order
to make the operation atomic. So a user-defined module
writing function that mimics the default behavior would be:
.. sourcecode:: python
import tempfile
import os
import shutil
def module_writer(source, outputpath):
(dest, name) = \\
tempfile.mkstemp(
dir=os.path.dirname(outputpath)
)
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
from mako.template import Template
mytemplate = Template(
filename="index.html",
module_directory="/path/to/modules",
module_writer=module_writer
)
The function is provided for unusual configurations where
certain platform-specific permissions or other special
steps are needed.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
:param preprocessor: Python callable which will be passed
the full template source before it is parsed. The return
result of the callable will be used as the template source
code.
:param lexer_cls: A :class:`.Lexer` class used to parse
the template. The :class:`.Lexer` class is used by
default.
.. versionadded:: 0.7.4
:param strict_undefined: Replaces the automatic usage of
``UNDEFINED`` for any undeclared variables not located in
the :class:`.Context` with an immediate raise of
``NameError``. The advantage is immediate reporting of
missing variables which include the name.
.. versionadded:: 0.3.6
:param uri: string URI or other identifier for this template.
If not provided, the ``uri`` is generated from the filesystem
path, or from the in-memory identity of a non-file-based
template. The primary usage of the ``uri`` is to provide a key
within :class:`.TemplateLookup`, as well as to generate the
file path of the generated Python module file, if
``module_directory`` is specified.
"""
lexer_cls = Lexer
def __init__(
self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors="strict",
module_directory=None,
cache_args=None,
cache_impl="beaker",
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
module_writer=None,
bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
preprocessor=None,
lexer_cls=None,
include_error_handler=None,
):
if uri:
self.module_id = re.sub(r"\W", "_", uri)
self.uri = uri
elif filename:
self.module_id = re.sub(r"\W", "_", filename)
drive, path = os.path.splitdrive(filename)
path = os.path.normpath(path).replace(os.path.sep, "/")
self.uri = path
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
u_norm = self.uri
if u_norm.startswith("/"):
u_norm = u_norm[1:]
u_norm = os.path.normpath(u_norm)
if u_norm.startswith(".."):
raise exceptions.TemplateLookupException(
'Template uri "%s" is invalid - '
"it cannot be relative outside "
"of the root path." % self.uri
)
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = enable_loop
self.strict_undefined = strict_undefined
self.module_writer = module_writer
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "support disabling Unicode"
)
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used."
)
if default_filters is None:
if compat.py3k or self.disable_unicode:
self.default_filters = ["str"]
else:
self.default_filters = ["unicode"]
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.future_imports = future_imports
self.preprocessor = preprocessor
if lexer_cls is not None:
self.lexer_cls = lexer_cls
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
self._code = code
self._source = text
ModuleInfo(module, None, self, filename, code, text, uri)
elif filename is not None:
# if template filename and a module directory, load
# a filesystem-based module file, generating if needed
if module_filename is not None:
path = module_filename
elif module_directory is not None:
path = os.path.abspath(
os.path.join(
os.path.normpath(module_directory), u_norm + ".py"
)
)
else:
path = None
module = self._compile_from_file(path, filename)
else:
raise exceptions.RuntimeException(
"Template requires text or filename"
)
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup
self.module_directory = module_directory
self._setup_cache_args(
cache_impl,
cache_enabled,
cache_args,
cache_type,
cache_dir,
cache_url,
)
@util.memoized_property
def reserved_names(self):
if self.enable_loop:
return codegen.RESERVED_NAMES
else:
return codegen.RESERVED_NAMES.difference(["loop"])
def _setup_cache_args(
self,
cache_impl,
cache_enabled,
cache_args,
cache_type,
cache_dir,
cache_url,
):
self.cache_impl = cache_impl
self.cache_enabled = cache_enabled
if cache_args:
self.cache_args = cache_args
else:
self.cache_args = {}
# transfer deprecated cache_* args
if cache_type:
self.cache_args["type"] = cache_type
if cache_dir:
self.cache_args["dir"] = cache_dir
if cache_url:
self.cache_args["url"] = cache_url
def _compile_from_file(self, path, filename):
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if (
not os.path.exists(path)
or os.stat(path)[stat.ST_MTIME] < filemtime
):
data = util.read_file(filename)
_compile_module_file(
self, data, filename, path, self.module_writer
)
module = compat.load_module(self.module_id, path)
if module._magic_number != codegen.MAGIC_NUMBER:
data = util.read_file(filename)
_compile_module_file(
self, data, filename, path, self.module_writer
)
module = compat.load_module(self.module_id, path)
ModuleInfo(module, path, self, filename, None, None, None)
else:
# template filename and no module directory, compile code
# in memory
data = util.read_file(filename)
code, module = _compile_text(self, data, filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None, None)
return module
@property
def source(self):
"""Return the template source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).source
@property
def code(self):
"""Return the module source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).code
@util.memoized_property
def cache(self):
return cache.Cache(self)
@property
def cache_dir(self):
return self.cache_args["dir"]
@property
def cache_url(self):
return self.cache_args["url"]
@property
def cache_type(self):
return self.cache_args["type"]
def render(self, *args, **data):
"""Render the output of this template as a string.
If the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw
output uses `cStringIO` and can't handle multibyte
characters). A :class:`.Context` object is created corresponding
to the given data. Arguments that are explicitly declared
by this template's internal rendering method are also
pulled from the given ``*args``, ``**data`` members.
"""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""Render the output of this template as a unicode object."""
return runtime._render(
self, self.callable_, args, data, as_unicode=True
)
def render_context(self, context, *args, **kwargs):
"""Render this :class:`.Template` with the given context.
The data is written to the context's buffer.
"""
if getattr(context, "_with_template", None) is None:
context._set_with_template(self)
runtime._render_context(self, self.callable_, context, *args, **kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""Return a def of this template as a :class:`.DefTemplate`."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
def list_defs(self):
"""return a list of defs in the template.
.. versionadded:: 1.0.4
"""
return [i[7:] for i in dir(self.module) if i[:7] == "render_"]
def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name)
@property
def last_modified(self):
return self.module._modified_time
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print(t.render())
"""
def __init__(
self,
module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors="strict",
disable_unicode=False,
bytestring_passthrough=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_args=None,
cache_impl="beaker",
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
include_error_handler=None,
):
self.module_id = re.sub(r"\W", "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = module._enable_loop
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "support disabling Unicode"
)
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used."
)
self.module = module
self.filename = template_filename
ModuleInfo(
module,
module_filename,
self,
template_filename,
module_source,
template_source,
module._template_uri,
)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup
self._setup_cache_args(
cache_impl,
cache_enabled,
cache_args,
cache_type,
cache_dir,
cache_url,
)
class DefTemplate(Template):
"""A :class:`.Template` which represents a callable def in a parent
template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
self.output_encoding = parent.output_encoding
self.module = parent.module
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.include_error_handler = parent.include_error_handler
self.enable_loop = parent.enable_loop
self.lookup = parent.lookup
self.bytestring_passthrough = parent.bytestring_passthrough
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""Stores information about a module currently loaded into
memory, provides reverse lookups of template source, module
source code based on a module's identifier.
"""
_modules = weakref.WeakValueDictionary()
def __init__(
self,
module,
module_filename,
template,
template_filename,
module_source,
template_source,
template_uri,
):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
self.module_source = module_source
self.template_source = template_source
self.template_uri = template_uri
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
@classmethod
def get_module_source_metadata(cls, module_source, full_line_map=False):
source_map = re.search(
r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", module_source, re.S
).group(1)
source_map = json.loads(source_map)
source_map["line_map"] = dict(
(int(k), int(v)) for k, v in source_map["line_map"].items()
)
if full_line_map:
f_line_map = source_map["full_line_map"] = []
line_map = source_map["line_map"]
curr_templ_line = 1
for mod_line in range(1, max(line_map)):
if mod_line in line_map:
curr_templ_line = line_map[mod_line]
f_line_map.append(curr_templ_line)
return source_map
@property
def code(self):
if self.module_source is not None:
return self.module_source
else:
return util.read_python_file(self.module_filename)
@property
def source(self):
if self.template_source is not None:
if self.module._source_encoding and not isinstance(
self.template_source, compat.text_type
):
return self.template_source.decode(
self.module._source_encoding
)
else:
return self.template_source
else:
data = util.read_file(self.template_filename)
if self.module._source_encoding:
return data.decode(self.module._source_encoding)
else:
return data
def _compile(template, text, filename, generate_magic_comment):
lexer = template.lexer_cls(
text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor,
)
node = lexer.parse()
source = codegen.compile(
node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
future_imports=template.future_imports,
source_encoding=lexer.encoding,
generate_magic_comment=generate_magic_comment,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined,
enable_loop=template.enable_loop,
reserved_names=template.reserved_names,
)
return source, lexer
def _compile_text(template, text, filename):
identifier = template.module_id
source, lexer = _compile(
template,
text,
filename,
generate_magic_comment=template.disable_unicode,
)
cid = identifier
if not compat.py3k and isinstance(cid, compat.text_type):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, "exec")
# this exec() works for 2.4->3.3.
exec(code, module.__dict__, module.__dict__)
return (source, module)
def _compile_module_file(template, text, filename, outputpath, module_writer):
source, lexer = _compile(
template, text, filename, generate_magic_comment=True
)
if isinstance(source, compat.text_type):
source = source.encode(lexer.encoding or "ascii")
if module_writer:
module_writer(source, outputpath)
else:
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
if compat.py3k:
return _get_module_info(callable_.__globals__["__name__"])
else:
return _get_module_info(callable_.func_globals["__name__"])
def _get_module_info(filename):
return ModuleInfo._modules[filename]
| 26,455
|
Python
|
.py
| 641
| 32.221529
| 79
| 0.633149
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,693
|
parsetree.py
|
rembo10_headphones/lib/mako/parsetree.py
|
# mako/parsetree.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
import re
from mako import ast
from mako import compat
from mako import exceptions
from mako import filters
from mako import util
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {
"source": self.source,
"lineno": self.lineno,
"pos": self.pos,
"filename": self.filename,
}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__("", 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes,
)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ["for", "if", "while", "try", "with"]
self.nodes = []
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
"if": set(["else", "elif"]),
"try": set(["except", "finally"]),
"for": set(["else"]),
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos),
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos),
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos),
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict_):
if getattr(cls, "__keyword__", None) is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict_)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(":")
return type.__call__(
CallNamespaceTag, ns, defname, attributes, **kwargs
)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs["source"],
lineno=kwargs["lineno"],
pos=kwargs["pos"],
filename=kwargs["filename"],
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags.
e.g.::
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__keyword__ = None
def __init__(
self,
keyword,
attributes,
expressions,
nonexpressions,
required,
**kwargs
):
r"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s"
% ",".join([repr(m) for m in missing]),
**self.exception_kwargs
)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r"(\${.+?})", re.S).split(
self.attributes[key]
):
m = re.compile(r"^\${(.+?)}$", re.S).match(x)
if m:
code = ast.PythonCode(
m.group(1).rstrip(), **self.exception_kwargs
)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = undeclared_identifiers.union(
code.undeclared_identifiers
)
expr.append("(%s)" % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr("")
elif key in nonexpressions:
if re.search(r"\${.+?}", self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs
)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'"
% (self.keyword, key),
**self.exception_kwargs
)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (
self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes,
)
class IncludeTag(Tag):
__keyword__ = "include"
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
("file", "import", "args"),
(),
("file",),
**kwargs
)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.difference(
set(["__DUMMY"])
).difference(self.page_args.declared_identifiers)
return identifiers.union(
super(IncludeTag, self).undeclared_identifiers()
)
class NamespaceTag(Tag):
__keyword__ = "namespace"
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword,
attributes,
("file",),
("name", "inheritable", "import", "module"),
(),
**kwargs
)
self.name = attributes.get("name", "__anon_%s" % hex(abs(id(self))))
if "name" not in attributes and "import" not in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs
)
if "file" in attributes and "module" in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = "text"
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword, attributes, (), ("filter"), (), **kwargs
)
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
def undeclared_identifiers(self):
return self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
).union(self.expression_undeclared_identifiers)
class DefTag(Tag):
__keyword__ = "def"
def __init__(self, keyword, attributes, **kwargs):
expressions = ["buffered", "cached"] + [
c for c in attributes if c.startswith("cache_")
]
super(DefTag, self).__init__(
keyword,
attributes,
expressions,
("name", "filter", "decorator"),
("name",),
**kwargs
)
name = attributes["name"]
if re.match(r"^[\w_]+$", name):
raise exceptions.CompileException(
"Missing parenthesis in %def", **self.exception_kwargs
)
self.function_decl = ast.FunctionDecl(
"def " + name + ":pass", **self.exception_kwargs
)
self.name = self.function_decl.funcname
self.decorator = attributes.get("decorator", "")
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.allargnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(
ast.PythonCode(
c, **self.exception_kwargs
).undeclared_identifiers
)
return (
set(res)
.union(
self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
)
)
.union(self.expression_undeclared_identifiers)
.difference(self.function_decl.allargnames)
)
class BlockTag(Tag):
__keyword__ = "block"
def __init__(self, keyword, attributes, **kwargs):
expressions = ["buffered", "cached", "args"] + [
c for c in attributes if c.startswith("cache_")
]
super(BlockTag, self).__init__(
keyword,
attributes,
expressions,
("name", "filter", "decorator"),
(),
**kwargs
)
name = attributes.get("name")
if name and not re.match(r"^[\w_]+$", name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs
)
if not name and attributes.get("args", None):
raise exceptions.CompileException(
"Only named %blocks may specify args", **self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
self.name = name
self.decorator = attributes.get("decorator", "")
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno,)
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.allargnames
def undeclared_identifiers(self):
return (
self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
)
).union(self.expression_undeclared_identifiers)
class CallTag(Tag):
__keyword__ = "call"
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(
keyword, attributes, ("args"), ("expr",), ("expr",), **kwargs
)
self.expression = attributes["expr"]
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.difference(
self.code.declared_identifiers
)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ("args",),
(),
(),
**kwargs
)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(
[
"%s=%s" % (k, v)
for k, v in self.parsed_attributes.items()
if k != "args"
]
),
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.difference(
self.code.declared_identifiers
)
class InheritTag(Tag):
__keyword__ = "inherit"
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes, ("file",), (), ("file",), **kwargs
)
class PageTag(Tag):
__keyword__ = "page"
def __init__(self, keyword, attributes, **kwargs):
expressions = [
"cached",
"args",
"expression_filter",
"enable_loop",
] + [c for c in attributes if c.startswith("cache_")]
super(PageTag, self).__init__(
keyword, attributes, expressions, (), (), **kwargs
)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
self.filter_args = ast.ArgumentList(
attributes.get("expression_filter", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.body_decl.allargnames
| 19,411
|
Python
|
.py
| 522
| 26.795019
| 79
| 0.555479
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,694
|
runtime.py
|
rembo10_headphones/lib/mako/runtime.py
|
# mako/runtime.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
import functools
import sys
from mako import compat
from mako import exceptions
from mako import util
from mako.compat import compat_builtins
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data["capture"] = functools.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data["caller"] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words passed to render(): %s"
% ", ".join(illegal_names)
)
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop("self", None)
x.pop("parent", None)
x.pop("next", None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise exceptions.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: mako
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(
self,
name,
context,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(
uri,
self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri,
)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == "*":
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" % (self.name, key)
)
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(
self,
name,
context,
template=None,
templateuri=None,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri, calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = _populate_self_namespace(
context, self.template, self_ns=self
)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return functools.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = functools.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" % (self.name, key)
)
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(
self,
name,
context,
module,
callables=None,
inherits=None,
populate_self=True,
calling_uri=None,
):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split(".")[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != "_":
callable_ = getattr(self.module, key)
if callable(callable_):
yield key, functools.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = functools.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" % (self.name, key)
)
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(), template
)
kwargs = _kwargs_for_include(callable_, context._data, **kwargs)
if template.include_error_handler:
try:
callable_(ctx, **kwargs)
except Exception:
result = template.include_error_handler(ctx, compat.exception_as())
if not result:
compat.reraise(*sys.exc_info())
else:
callable_(ctx, **kwargs)
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context["self"]
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({"next": ih})
ih.inherits = TemplateNamespace(
"self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False,
)
context._data["parent"] = lclcontext._data["local"] = ih.inherits
callable_ = getattr(template.module, "_mako_inherit", None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, "_mako_generate_namespaces", None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException(
"Template '%s' has no TemplateLookup associated"
% context._with_template.uri
)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace(
"self:%s" % template.uri,
context,
template=template,
populate_self=False,
)
context._data["self"] = context._data["local"] = self_ns
if hasattr(template.module, "_mako_inherit"):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors,
)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(
template,
callable_,
context,
*args,
**_kwargs_for_callable(callable_, data)
)
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_getargspec(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != "context" and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_getargspec(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != "context" and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and (
template.format_exceptions or template.error_handler
):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [
util.FastEncodingBuffer(as_unicode=True)
]
else:
context._buffer_stack[:] = [
util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors,
)
]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
| 28,040
|
Python
|
.py
| 746
| 29.146113
| 79
| 0.611895
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,695
|
util.py
|
rembo10_headphones/lib/mako/util.py
|
# mako/util.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ast import parse
import codecs
import collections
import operator
import os
import re
import timeit
from mako import compat
def update_wrapper(decorated, fn):
decorated.__wrapped__ = fn
decorated.__name__ = fn.__name__
return decorated
class PluginLoader(object):
def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else:
import pkg_resources
for impl in pkg_resources.iter_entry_points(self.group, name):
self.impls[name] = impl.load
return impl.load()
else:
from mako import exceptions
raise exceptions.RuntimeException(
"Can't load plugin %s %s" % (self.group, name)
)
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def verify_directory(dir_):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir_):
try:
tries += 1
os.makedirs(dir_, compat.octal("0775"))
except:
if tries > 5:
raise
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors="strict", as_unicode=False):
self.data = collections.deque()
self.encoding = encoding
if as_unicode:
self.delim = compat.u("")
else:
self.delim = ""
self.as_unicode = as_unicode
self.errors = errors
self.write = self.data.append
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(
self.encoding, self.errors
)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items,
discarding lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = timeit.default_timer()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=0.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = timeit.default_timer()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(
dict.values(self),
key=operator.attrgetter("timestamp"),
reverse=True,
)
for item in bytime[self.capacity :]:
try:
del self[item.key]
except KeyError:
# if we couldn't find a key, most likely some other thread
# broke in on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r"[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)", re.VERBOSE
)
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8) :]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore"))
if not m:
try:
parse(line1.decode("ascii", "ignore"))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(
line2.decode("ascii", "ignore")
)
if has_bom:
if m:
raise SyntaxError(
"python refuses to compile code with both a UTF8"
" byte-order-mark and a magic encoding comment"
)
return "utf_8"
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = list(d.keys())
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, "AST"):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile(
"""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""",
"<unknown>",
"exec",
_ast.PyCF_ONLY_AST,
)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == "mod":
_ast.mod = cls
elif cls.__name__ == "AST":
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])
def read_file(path, mode="rb"):
fp = open(path, mode)
try:
data = fp.read()
return data
finally:
fp.close()
def read_python_file(path):
fp = open(path, "rb")
try:
encoding = parse_encoding(fp)
data = fp.read()
if encoding:
data = data.decode(encoding)
return data
finally:
fp.close()
| 11,038
|
Python
|
.py
| 313
| 26.958466
| 79
| 0.572758
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,696
|
pygen.py
|
rembo10_headphones/lib/mako/pygen.py
|
# mako/pygen.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# current line number
self.lineno = 1
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
# mapping of generated python lines to template
# source lines
self.source_map = {}
def _update_lineno(self, num):
self.lineno += num
def start_source(self, lineno):
if self.lineno not in self.source_map:
self.source_map[self.lineno] = lineno
def write_blanks(self, num):
self.stream.write("\n" * num)
self._update_lineno(num)
def write_indented_block(self, block, starting_lineno=None):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for i, l in enumerate(re.split(r"\r?\n", block)):
self.line_buffer.append(l)
if starting_lineno is not None:
self.start_source(starting_lineno + i)
self._update_lineno(1)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
if (
line is None
or re.match(r"^\s*#", line)
or re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == "#"
# see if this line should decrease the indentation level
if not is_comment and (not hastext or self._is_unindentor(line)):
if self.indent > 0:
self.indent -= 1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures"
)
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
self._update_lineno(len(line.split("\n")))
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent += 1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(
r"^\s*(def|class|else|elif|except|finally)", line
)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
# keyword = match.group(1)
# match the original indent keyword
# for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
# ]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
# return False
def _indent_line(self, line, stripspace=""):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(
r"^%s" % stripspace, self.indentstring * self.indent, line
)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = self.backslashed or self.triplequoted
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = state[backslashed] or state[triplequoted]
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)) :]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r"#", line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace=""):
return re.sub(r"^%s" % stripspace, "", line)
lines = []
stripspace = None
for line in re.split(r"\r?\n", text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
| 10,073
|
Python
|
.py
| 236
| 31.661017
| 79
| 0.567158
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,697
|
pyparser.py
|
rembo10_headphones/lib/mako/pyparser.py
|
# mako/pyparser.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
import operator
import _ast
from mako import _ast_util
from mako import compat
from mako import exceptions
from mako import util
from mako.compat import arg_stringname
if compat.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(["True", "False", "None", "print"])
# the "id" attribute on a function node
arg_id = operator.attrgetter("arg")
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(["True", "False", "None"])
# the "id" attribute on a function node
arg_id = operator.attrgetter("id")
util.restore__ast(_ast)
def parse(code, mode="exec", **exception_kwargs):
"""Parse an expression into AST"""
try:
return _ast_util.parse(code, "<unknown>", mode)
except Exception:
raise exceptions.SyntaxException(
"(%s) %s (%r)"
% (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50],
),
**exception_kwargs
)
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if compat.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.visit(node.type)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
for n in arg.elts:
yield n
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union(
[arg_id(arg) for arg in self._expand_tuples(node.args.args)]
)
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif (
node.id not in reserved
and node.id not in self.listener.declared_identifiers
and node.id not in self.local_ident_stack
):
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split(".")[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == "*":
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.",
**self.exception_kwargs
)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
ldi = self.listener.declared_identifiers
self.listener.declared_identifiers = ldi.union(
p.declared_identifiers
)
lui = self.listener.undeclared_identifiers
self.listener.undeclared_identifiers = lui.union(
p.undeclared_identifiers
)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(arg_stringname(node.args.vararg))
if compat.py2k:
# kw-only args don't exist in Python 2
kwargnames = []
else:
kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
if node.args.kwarg:
kwargnames.append(arg_stringname(node.args.kwarg))
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.kwargnames = kwargnames
if compat.py2k:
self.listener.kwdefaults = []
else:
self.listener.kwdefaults = node.args.kw_defaults
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(" " * 4)
self.generator.visit(astnode)
def value(self):
return "".join(self.generator.result)
| 7,789
|
Python
|
.py
| 196
| 29.994898
| 77
| 0.605804
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,698
|
ast.py
|
rembo10_headphones/lib/mako/ast.py
|
# mako/ast.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
import re
from mako import compat
from mako import exceptions
from mako import pyparser
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.::
for x in 5:
elif y==9:
except (MyException, e):
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" % code,
**exception_kwargs
)
if m.group(3):
code = code[: m.start(3)]
(keyword, expr) = m.group(1, 2)
if keyword in ["for", "if", "while"]:
code = code + "pass"
elif keyword == "try":
code = code + "pass\nexcept:pass"
elif keyword == "elif" or keyword == "else":
code = "if False:pass\n" + code + "pass"
elif keyword == "except":
code = "try:pass\n" + code + "pass"
elif keyword == "with":
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" % keyword,
**exception_kwargs
)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, "funcname"):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs
)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here"
% self.kwargnames[-1],
**exception_kwargs
)
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append(
"%s=%s"
% (name, pyparser.ExpressionGenerator(default).value())
)
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append(
"%s=%s"
% (name, pyparser.ExpressionGenerator(default).value())
)
namedecls.reverse()
return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__(
"def ANON(%s):pass" % code, **kwargs
)
| 6,789
|
Python
|
.py
| 162
| 31.358025
| 79
| 0.579587
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,699
|
cache.py
|
rembo10_headphones/lib/mako/cache.py
|
# mako/cache.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat
from mako import util
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class Cache(object):
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key, creation_function, **self._get_cache_kw(kw, context)
)
def set(self, key, value, **kw):
r"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
r"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
r"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate("render_body", __M_defname="render_body")
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate("render_%s" % name, __M_defname="render_%s" % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop("__M_defname", None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault("context", context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
r"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
r"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
r"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
r"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
| 7,736
|
Python
|
.py
| 174
| 36.597701
| 79
| 0.649546
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|