_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q36000 | BaseIssue.search | train | def search(cls, *, limit=100, page=1, properties=None, return_query=False):
"""Search for issues based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Issue`, `sqlalchemy.orm.Query`
"""
qry = db.Issue.order_by(Issue.issue_id).filter(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
if properties:
for prop_name, value in properties.items():
alias = aliased(IssueProperty)
qry = qry.join(alias, Issue.issue_id == alias.issue_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] | python | {
"resource": ""
} |
q36001 | RequiredTagsIssue.state_name | train | def state_name(self):
"""Get a human-readable value of the state
Returns:
str: Name of the current state
"""
if self.state == 1:
return 'New Issue'
elif self.state == 2:
return 'Shutdown in 1 week'
elif self.state == 3:
return 'Shutdown in 1 day'
elif self.state == 4:
return 'Pending Shutdown'
elif self.state == 5:
return 'Stopped, delete in 12 weeks'
elif self.state == 6:
return 'Instance deleted'
else:
raise ValueError('Invalid state: {}'.format(self.state)) | python | {
"resource": ""
} |
q36002 | copy | train | def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
"""Returns 0 on success"""
if excluded_tags is None:
excluded_tags = []
try:
id3 = mutagen.id3.ID3(src, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found in ", src, file=sys.stderr)
return 1
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
if verbose:
print_(u"File", src, u"contains:", file=sys.stderr)
print_(id3.pprint(), file=sys.stderr)
for tag in excluded_tags:
id3.delall(tag)
if merge:
try:
target = mutagen.id3.ID3(dst, translate=False)
except mutagen.id3.ID3NoHeaderError:
# no need to merge
pass
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
else:
for frame in id3.values():
target.add(frame)
id3 = target
# if the source is 2.3 save it as 2.3
if id3.version < (2, 4, 0):
id3.update_to_v23()
v2_version = 3
else:
id3.update_to_v24()
v2_version = 4
try:
id3.save(dst, v1=(2 if write_v1 else 0), v2_version=v2_version)
except Exception as err:
print_(u"Error saving", dst, u":\n%s" % text_type(err),
file=sys.stderr)
return 1
else:
if verbose:
print_(u"Successfully saved", dst, file=sys.stderr)
return 0 | python | {
"resource": ""
} |
q36003 | ID3FileType.add_tags | train | def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
"""
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists") | python | {
"resource": ""
} |
q36004 | EasyMP4Tags.RegisterKey | train | def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an MP4Tags instance
and the requested key name. The setter also receives the
desired value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the MP4 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister | python | {
"resource": ""
} |
q36005 | EasyMP4Tags.RegisterIntKey | train | def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
"""Register a scalar integer key.
"""
def getter(tags, key):
return list(map(text_type, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = [clamp(v) for v in map(int, value)]
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) | python | {
"resource": ""
} |
q36006 | EasyMP4Tags.pprint | train | def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings) | python | {
"resource": ""
} |
q36007 | _BitPaddedMixin.has_valid_padding | train | def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero"""
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True | python | {
"resource": ""
} |
q36008 | _ADTSStream.find_stream | train | def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
"""
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream | python | {
"resource": ""
} |
q36009 | _ADTSStream.sync | train | def sync(self, max_bytes):
"""Find the next sync.
Returns True if found."""
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False | python | {
"resource": ""
} |
q36010 | _DSFID3.save | train | def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the DSF file"""
fileobj = filething.fileobj
fileobj.seek(0)
dsd_header = DSDChunk(fileobj)
if dsd_header.offset_metdata_chunk == 0:
# create a new ID3 chunk at the end of the file
fileobj.seek(0, 2)
# store reference to ID3 location
dsd_header.offset_metdata_chunk = fileobj.tell()
dsd_header.write()
try:
data = self._prepare_data(
fileobj, dsd_header.offset_metdata_chunk, self.size,
v2_version, v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
fileobj.seek(dsd_header.offset_metdata_chunk)
fileobj.write(data)
fileobj.truncate()
# Update total file size
dsd_header.total_size = fileobj.tell()
dsd_header.write() | python | {
"resource": ""
} |
q36011 | Atom.read | train | def read(self, fileobj):
"""Return if all data could be read and the atom payload"""
fileobj.seek(self._dataoffset, 0)
data = fileobj.read(self.datalength)
return len(data) == self.datalength, data | python | {
"resource": ""
} |
q36012 | Atom.render | train | def render(name, data):
"""Render raw atom data."""
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data | python | {
"resource": ""
} |
q36013 | Atom.findall | train | def findall(self, name, recursive=False):
"""Recursively find all child atoms by specified name."""
if self.children is not None:
for child in self.children:
if child.name == name:
yield child
if recursive:
for atom in child.findall(name, True):
yield atom | python | {
"resource": ""
} |
q36014 | Atoms.path | train | def path(self, *names):
"""Look up and return the complete path of an atom.
For example, atoms.path('moov', 'udta', 'meta') will return a
list of three atoms, corresponding to the moov, udta, and meta
atoms.
"""
path = [self]
for name in names:
path.append(path[-1][name, ])
return path[1:] | python | {
"resource": ""
} |
q36015 | ASFValue | train | def ASFValue(value, kind, **kwargs):
"""Create a tag value of a specific kind.
::
ASFValue(u"My Value", UNICODE)
:rtype: ASFBaseAttribute
:raises TypeError: in case a wrong type was passed
:raises ValueError: in case the value can't be be represented as ASFValue.
"""
try:
attr_type = ASFBaseAttribute._get_type(kind)
except KeyError:
raise ValueError("Unknown value type %r" % kind)
else:
return attr_type(value=value, **kwargs) | python | {
"resource": ""
} |
q36016 | iter_text_fixups | train | def iter_text_fixups(data, encoding):
"""Yields a series of repaired text values for decoding"""
yield data
if encoding == Encoding.UTF16BE:
# wrong termination
yield data + b"\x00"
elif encoding == Encoding.UTF16:
# wrong termination
yield data + b"\x00"
# utf-16 is missing BOM, content is usually utf-16-le
yield codecs.BOM_UTF16_LE + data
# both cases combined
yield codecs.BOM_UTF16_LE + data + b"\x00" | python | {
"resource": ""
} |
q36017 | verify_fileobj | train | def verify_fileobj(fileobj, writable=False):
"""Verifies that the passed fileobj is a file like object which
we can use.
Args:
writable (bool): verify that the file object is writable as well
Raises:
ValueError: In case the object is not a file object that is readable
(or writable if required) or is not opened in bytes mode.
"""
try:
data = fileobj.read(0)
except Exception:
if not hasattr(fileobj, "read"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't read from file object %r" % fileobj)
if not isinstance(data, bytes):
raise ValueError(
"file object %r not opened in binary mode" % fileobj)
if writable:
try:
fileobj.write(b"")
except Exception:
if not hasattr(fileobj, "write"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't write to file object %r" % fileobj) | python | {
"resource": ""
} |
q36018 | loadfile | train | def loadfile(method=True, writable=False, create=False):
"""A decorator for functions taking a `filething` as a first argument.
Passes a FileThing instance as the first argument to the wrapped function.
Args:
method (bool): If the wrapped functions is a method
writable (bool): If a filename is passed opens the file readwrite, if
passed a file object verifies that it is writable.
create (bool): If passed a filename that does not exist will create
a new empty file.
"""
def convert_file_args(args, kwargs):
filething = args[0] if args else None
filename = kwargs.pop("filename", None)
fileobj = kwargs.pop("fileobj", None)
return filething, filename, fileobj, args[1:], kwargs
def wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(self, filething, filename, fileobj,
writable, create) as h:
return func(self, h, *args, **kwargs)
@wraps(func)
def wrapper_func(*args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(None, filething, filename, fileobj,
writable, create) as h:
return func(h, *args, **kwargs)
return wrapper if method else wrapper_func
return wrap | python | {
"resource": ""
} |
q36019 | convert_error | train | def convert_error(exc_src, exc_dest):
"""A decorator for reraising exceptions with a different type.
Mostly useful for IOError.
Args:
exc_src (type): The source exception type
exc_dest (type): The target exception type.
"""
def wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc_dest:
raise
except exc_src as err:
reraise(exc_dest, err, sys.exc_info()[2])
return wrapper
return wrap | python | {
"resource": ""
} |
q36020 | _openfile | train | def _openfile(instance, filething, filename, fileobj, writable, create):
"""yields a FileThing
Args:
filething: Either a file name, a file object or None
filename: Either a file name or None
fileobj: Either a file object or None
writable (bool): if the file should be opened
create (bool): if the file should be created if it doesn't exist.
implies writable
Raises:
MutagenError: In case opening the file failed
TypeError: in case neither a file name or a file object is passed
"""
assert not create or writable
# to allow stacked context managers, just pass the result through
if isinstance(filething, FileThing):
filename = filething.filename
fileobj = filething.fileobj
filething = None
if filething is not None:
if is_fileobj(filething):
fileobj = filething
elif hasattr(filething, "__fspath__"):
filename = filething.__fspath__()
if not isinstance(filename, (bytes, text_type)):
raise TypeError("expected __fspath__() to return a filename")
else:
filename = filething
if instance is not None:
# XXX: take "not writable" as loading the file..
if not writable:
instance.filename = filename
elif filename is None:
filename = getattr(instance, "filename", None)
if fileobj is not None:
verify_fileobj(fileobj, writable=writable)
yield FileThing(fileobj, filename, filename or fileobj_name(fileobj))
elif filename is not None:
verify_filename(filename)
inmemory_fileobj = False
try:
fileobj = open(filename, "rb+" if writable else "rb")
except IOError as e:
if writable and e.errno == errno.EOPNOTSUPP:
# Some file systems (gvfs over fuse) don't support opening
# files read/write. To make things still work read the whole
# file into an in-memory file like object and write it back
# later.
# https://github.com/quodlibet/mutagen/issues/300
try:
with open(filename, "rb") as fileobj:
fileobj = BytesIO(fileobj.read())
except IOError as e2:
raise MutagenError(e2)
inmemory_fileobj = True
elif create and e.errno == errno.ENOENT:
assert writable
try:
fileobj = open(filename, "wb+")
except IOError as e2:
raise MutagenError(e2)
else:
raise MutagenError(e)
with fileobj as fileobj:
yield FileThing(fileobj, filename, filename)
if inmemory_fileobj:
assert writable
data = fileobj.getvalue()
try:
with open(filename, "wb") as fileobj:
fileobj.write(data)
except IOError as e:
raise MutagenError(e)
else:
raise TypeError("Missing filename or fileobj argument") | python | {
"resource": ""
} |
q36021 | hashable | train | def hashable(cls):
"""Makes sure the class is hashable.
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
# py2
assert "__hash__" in cls.__dict__
# py3
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls | python | {
"resource": ""
} |
q36022 | enum | train | def enum(cls):
"""A decorator for creating an int enum class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an enum
Returns:
type: A new class
::
@enum
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
if self in map_:
return "%s.%s" % (type(self).__name__, map_[self])
return "%d" % int(self)
def repr_(self):
if self in map_:
return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self))
return "%d" % int(self)
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type | python | {
"resource": ""
} |
q36023 | flags | train | def flags(cls):
"""A decorator for creating an int flags class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an flags
Returns:
type: A new class
::
@flags
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
value = int(self)
matches = []
for k, v in map_.items():
if value & k:
matches.append("%s.%s" % (type(self).__name__, v))
value &= ~k
if value != 0 or not matches:
matches.append(text_type(value))
return " | ".join(matches)
def repr_(self):
return "<%s: %d>" % (str(self), int(self))
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type | python | {
"resource": ""
} |
q36024 | get_size | train | def get_size(fileobj):
"""Returns the size of the file.
The position when passed in will be preserved if no error occurs.
Args:
fileobj (fileobj)
Returns:
int: The size of the file
Raises:
IOError
"""
old_pos = fileobj.tell()
try:
fileobj.seek(0, 2)
return fileobj.tell()
finally:
fileobj.seek(old_pos, 0) | python | {
"resource": ""
} |
q36025 | read_full | train | def read_full(fileobj, size):
"""Like fileobj.read but raises IOError if not all requested data is
returned.
If you want to distinguish IOError and the EOS case, better handle
the error yourself instead of using this.
Args:
fileobj (fileobj)
size (int): amount of bytes to read
Raises:
IOError: In case read fails or not enough data is read
"""
if size < 0:
raise ValueError("size must not be negative")
data = fileobj.read(size)
if len(data) != size:
raise IOError
return data | python | {
"resource": ""
} |
q36026 | resize_file | train | def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16):
"""Resize a file by `diff`.
New space will be filled with zeros.
Args:
fobj (fileobj)
diff (int): amount of size to change
Raises:
IOError
"""
fobj.seek(0, 2)
filesize = fobj.tell()
if diff < 0:
if filesize + diff < 0:
raise ValueError
# truncate flushes internally
fobj.truncate(filesize + diff)
elif diff > 0:
try:
while diff:
addsize = min(BUFFER_SIZE, diff)
fobj.write(b"\x00" * addsize)
diff -= addsize
fobj.flush()
except IOError as e:
if e.errno == errno.ENOSPC:
# To reduce the chance of corrupt files in case of missing
# space try to revert the file expansion back. Of course
# in reality every in-file-write can also fail due to COW etc.
# Note: IOError gets also raised in flush() due to buffering
fobj.truncate(filesize)
raise | python | {
"resource": ""
} |
q36027 | insert_bytes | train | def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
Args:
fobj (fileobj)
size (int): The amount of space to insert
offset (int): The offset at which to insert the space
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
if movesize < 0:
raise ValueError
resize_file(fobj, size, BUFFER_SIZE)
if mmap is not None:
try:
mmap_move(fobj, offset + size, offset, movesize)
except mmap.error:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE)
else:
fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE) | python | {
"resource": ""
} |
q36028 | resize_bytes | train | def resize_bytes(fobj, old_size, new_size, offset):
"""Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
"""
if new_size < old_size:
delete_size = old_size - new_size
delete_at = offset + new_size
delete_bytes(fobj, delete_size, delete_at)
elif new_size > old_size:
insert_size = new_size - old_size
insert_at = offset + old_size
insert_bytes(fobj, insert_size, insert_at) | python | {
"resource": ""
} |
q36029 | decode_terminated | train | def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b"" | python | {
"resource": ""
} |
q36030 | BitReader.bits | train | def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value | python | {
"resource": ""
} |
q36031 | BitReader.bytes | train | def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in xrange(count))) | python | {
"resource": ""
} |
q36032 | BitReader.skip | train | def skip(self, count):
"""Skip `count` bits.
Might raise BitReaderError if there wasn't enough data to skip,
but might also fail on the next bits() instead.
"""
if count < 0:
raise ValueError
if count <= self._bits:
self.bits(count)
else:
count -= self.align()
n_bytes = count // 8
self._fileobj.seek(n_bytes, 1)
count -= n_bytes * 8
self.bits(count) | python | {
"resource": ""
} |
q36033 | BitReader.align | train | def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits | python | {
"resource": ""
} |
q36034 | _get_win_argv | train | def _get_win_argv():
"""Returns a unicode argv under Windows and standard sys.argv otherwise
Returns:
List[`fsnative`]
"""
assert is_win
argc = ctypes.c_int()
try:
argv = winapi.CommandLineToArgvW(
winapi.GetCommandLineW(), ctypes.byref(argc))
except WindowsError:
return []
if not argv:
return []
res = argv[max(0, argc.value - len(sys.argv)):argc.value]
winapi.LocalFree(argv)
return res | python | {
"resource": ""
} |
q36035 | _get_userdir | train | def _get_userdir(user=None):
"""Returns the user dir or None"""
if user is not None and not isinstance(user, fsnative):
raise TypeError
if is_win:
if "HOME" in environ:
path = environ["HOME"]
elif "USERPROFILE" in environ:
path = environ["USERPROFILE"]
elif "HOMEPATH" in environ and "HOMEDRIVE" in environ:
path = os.path.join(environ["HOMEDRIVE"], environ["HOMEPATH"])
else:
return
if user is None:
return path
else:
return os.path.join(os.path.dirname(path), user)
else:
import pwd
if user is None:
if "HOME" in environ:
return environ["HOME"]
else:
try:
return path2fsn(pwd.getpwuid(os.getuid()).pw_dir)
except KeyError:
return
else:
try:
return path2fsn(pwd.getpwnam(user).pw_dir)
except KeyError:
return | python | {
"resource": ""
} |
q36036 | OggPage.write | train | def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append(b"\xff" * quot + chr_(rem))
lacing_data = b"".join(lacing_data)
if not self.complete and lacing_data.endswith(b"\x00"):
lacing_data = lacing_data[:-1]
data.append(chr_(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = b"".join(data)
# Python's CRC is swapped relative to Ogg's needs.
# crc32 returns uint prior to py2.6 on some platforms, so force uint
crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff
# Although we're using to_uint_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_uint_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data | python | {
"resource": ""
} |
q36037 | OggPage.size | train | def size(self):
"""Total frame size."""
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size | python | {
"resource": ""
} |
q36038 | OggPage.renumber | train | def renumber(fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try:
page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1 | python | {
"resource": ""
} |
q36039 | OggPage.to_packets | train | def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets] | python | {
"resource": ""
} |
q36040 | OggPage.from_packets | train | def from_packets(packets, sequence=0, default_size=4096,
wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append(b"")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = b""
if page.packets:
pages.append(page)
return pages | python | {
"resource": ""
} |
q36041 | OggPage.replace | train | def replace(cls, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
if not len(old_pages) or not len(new_pages):
raise ValueError("empty pages list not allowed")
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in izip(new_pages,
xrange(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1
new_data = [cls.write(p) for p in new_pages]
# Add dummy data or merge the remaining data together so multiple
# new pages replace an old one
pages_diff = len(old_pages) - len(new_data)
if pages_diff > 0:
new_data.extend([b""] * pages_diff)
elif pages_diff < 0:
new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])]
# Replace pages one by one. If the sizes match no resize happens.
offset_adjust = 0
new_data_end = None
assert len(old_pages) == len(new_data)
for old_page, data in izip(old_pages, new_data):
offset = old_page.offset + offset_adjust
data_size = len(data)
resize_bytes(fileobj, old_page.size, data_size, offset)
fileobj.seek(offset, 0)
fileobj.write(data)
new_data_end = offset + data_size
offset_adjust += (data_size - old_page.size)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
cls.renumber(fileobj, serial, sequence) | python | {
"resource": ""
} |
q36042 | OggPage.find_last | train | def find_last(fileobj, serial, finishing=False):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
If finishing is True it returns the last page which contains a packet
finishing on it. If there exist pages but none with finishing packets
returns None.
Returns None in case no page with the serial exists.
Raises error in case this isn't a valid ogg stream.
Raises IOError.
"""
# For non-muxed streams, look at the last page.
seek_end(fileobj, 256 * 256)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
def is_valid(page):
return not finishing or page.position != -1
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial and is_valid(page):
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while True:
if page.serial == serial:
if is_valid(page):
best_page = page
if page.last:
break
page = OggPage(fileobj)
return best_page
except error:
return best_page
except EOFError:
return best_page | python | {
"resource": ""
} |
q36043 | guid2bytes | train | def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
]) | python | {
"resource": ""
} |
q36044 | bytes2guid | train | def bytes2guid(s):
"""Converts a serialized GUID to a text GUID"""
assert isinstance(s, bytes)
u = struct.unpack
v = []
v.extend(u("<IHH", s[:8]))
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
return "%08X-%04X-%04X-%04X-%012X" % tuple(v) | python | {
"resource": ""
} |
q36045 | PaddingInfo.get_default_padding | train | def get_default_padding(self):
"""The default implementation which tries to select a reasonable
amount of padding and which might change in future versions.
Returns:
int: Amount of padding after saving
"""
high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data
low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data
if self.padding >= 0:
# enough padding left
if self.padding > high:
# padding too large, reduce
return low
# just use existing padding as is
return self.padding
else:
# not enough padding, add some
return low | python | {
"resource": ""
} |
q36046 | MP4Tags.__update_offset_table | train | def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets)) | python | {
"resource": ""
} |
q36047 | MP4Tags.__update_offsets | train | def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass | python | {
"resource": ""
} |
q36048 | MP4Tags.delete | train | def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0) | python | {
"resource": ""
} |
q36049 | MP4Info._parse_stsd | train | def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description | python | {
"resource": ""
} |
q36050 | BaseDescriptor._parse_desc_length_file | train | def _parse_desc_length_file(cls, fileobj):
"""May raise ValueError"""
value = 0
for i in xrange(4):
try:
b = cdata.uint8(fileobj.read(1))
except cdata.error as e:
raise ValueError(e)
value = (value << 7) | (b & 0x7f)
if not b >> 7:
break
else:
raise ValueError("invalid descriptor length")
return value | python | {
"resource": ""
} |
q36051 | BaseDescriptor.parse | train | def parse(cls, fileobj):
"""Returns a parsed instance of the called type.
The file position is right after the descriptor after this returns.
Raises DescriptorError
"""
try:
length = cls._parse_desc_length_file(fileobj)
except ValueError as e:
raise DescriptorError(e)
pos = fileobj.tell()
instance = cls(fileobj, length)
left = length - (fileobj.tell() - pos)
if left < 0:
raise DescriptorError("descriptor parsing read too much data")
fileobj.seek(left, 1)
return instance | python | {
"resource": ""
} |
q36052 | DecoderConfigDescriptor.codec_desc | train | def codec_desc(self):
"""string or None"""
info = self.decSpecificInfo
desc = None
if info is not None:
desc = info.description
return desc | python | {
"resource": ""
} |
q36053 | DecoderSpecificInfo.description | train | def description(self):
"""string or None if unknown"""
name = None
try:
name = self._TYPE_NAMES[self.audioObjectType]
except IndexError:
pass
if name is None:
return
if self.sbrPresentFlag == 1:
name += "+SBR"
if self.psPresentFlag == 1:
name += "+PS"
return text_type(name) | python | {
"resource": ""
} |
q36054 | DecoderSpecificInfo.channels | train | def channels(self):
"""channel count or 0 for unknown"""
# from ProgramConfigElement()
if hasattr(self, "pce_channels"):
return self.pce_channels
conf = getattr(
self, "extensionChannelConfiguration", self.channelConfiguration)
if conf == 1:
if self.psPresentFlag == -1:
return 0
elif self.psPresentFlag == 1:
return 2
else:
return 1
elif conf == 7:
return 8
elif conf > 7:
return 0
else:
return conf | python | {
"resource": ""
} |
q36055 | _read_track | train | def _read_track(chunk):
"""Retuns a list of midi events and tempo change events"""
TEMPO, MIDI = range(2)
# Deviations: The running status should be reset on non midi events, but
# some files contain meta events inbetween.
# TODO: Offset and time signature are not considered.
tempos = []
events = []
chunk = bytearray(chunk)
deltasum = 0
status = 0
off = 0
while off < len(chunk):
delta, off = _var_int(chunk, off)
deltasum += delta
event_type = chunk[off]
off += 1
if event_type == 0xFF:
meta_type = chunk[off]
off += 1
num, off = _var_int(chunk, off)
# TODO: support offset/time signature
if meta_type == 0x51:
data = chunk[off:off + num]
if len(data) != 3:
raise SMFError
tempo = struct.unpack(">I", b"\x00" + bytes(data))[0]
tempos.append((deltasum, TEMPO, tempo))
off += num
elif event_type in (0xF0, 0xF7):
val, off = _var_int(chunk, off)
off += val
else:
if event_type < 0x80:
# if < 0x80 take the type from the previous midi event
off += 1
event_type = status
elif event_type < 0xF0:
off += 2
status = event_type
else:
raise SMFError("invalid event")
if event_type >> 4 in (0xD, 0xC):
off -= 1
events.append((deltasum, MIDI, delta))
return events, tempos | python | {
"resource": ""
} |
q36056 | _read_midi_length | train | def _read_midi_length(fileobj):
"""Returns the duration in seconds. Can raise all kind of errors..."""
TEMPO, MIDI = range(2)
def read_chunk(fileobj):
info = fileobj.read(8)
if len(info) != 8:
raise SMFError("truncated")
chunklen = struct.unpack(">I", info[4:])[0]
data = fileobj.read(chunklen)
if len(data) != chunklen:
raise SMFError("truncated")
return info[:4], data
identifier, chunk = read_chunk(fileobj)
if identifier != b"MThd":
raise SMFError("Not a MIDI file")
if len(chunk) != 6:
raise SMFError("truncated")
format_, ntracks, tickdiv = struct.unpack(">HHH", chunk)
if format_ > 1:
raise SMFError("Not supported format %d" % format_)
if tickdiv >> 15:
# fps = (-(tickdiv >> 8)) & 0xFF
# subres = tickdiv & 0xFF
# never saw one of those
raise SMFError("Not supported timing interval")
# get a list of events and tempo changes for each track
tracks = []
first_tempos = None
for tracknum in xrange(ntracks):
identifier, chunk = read_chunk(fileobj)
if identifier != b"MTrk":
continue
events, tempos = _read_track(chunk)
# In case of format == 1, copy the first tempo list to all tracks
first_tempos = first_tempos or tempos
if format_ == 1:
tempos = list(first_tempos)
events += tempos
events.sort()
tracks.append(events)
# calculate the duration of each track
durations = []
for events in tracks:
tempo = 500000
parts = []
deltasum = 0
for (dummy, type_, data) in events:
if type_ == TEMPO:
parts.append((deltasum, tempo))
tempo = data
deltasum = 0
else:
deltasum += data
parts.append((deltasum, tempo))
duration = 0
for (deltasum, tempo) in parts:
quarter, tpq = deltasum / float(tickdiv), tempo
duration += (quarter * tpq)
duration /= 10 ** 6
durations.append(duration)
# return the longest one
return max(durations) | python | {
"resource": ""
} |
q36057 | _swap_bytes | train | def _swap_bytes(data):
"""swaps bytes for 16 bit, leaves remaining trailing bytes alone"""
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data) | python | {
"resource": ""
} |
q36058 | _codec_can_decode_with_surrogatepass | train | def _codec_can_decode_with_surrogatepass(codec, _cache={}):
"""Returns if a codec supports the surrogatepass error handler when
decoding. Some codecs were broken in Python <3.4
"""
try:
return _cache[codec]
except KeyError:
try:
u"\ud83d".encode(
codec, _surrogatepass).decode(codec, _surrogatepass)
except UnicodeDecodeError:
_cache[codec] = False
else:
_cache[codec] = True
return _cache[codec] | python | {
"resource": ""
} |
q36059 | _winpath2bytes_py3 | train | def _winpath2bytes_py3(text, codec):
"""Fallback implementation for text including surrogates"""
# merge surrogate codepoints
if _normalize_codec(codec).startswith("utf-16"):
# fast path, utf-16 merges anyway
return text.encode(codec, _surrogatepass)
return _decode_surrogatepass(
text.encode("utf-16-le", _surrogatepass),
"utf-16-le").encode(codec, _surrogatepass) | python | {
"resource": ""
} |
q36060 | _get_encoding | train | def _get_encoding():
"""The encoding used for paths, argv, environ, stdout and stdin"""
encoding = sys.getfilesystemencoding()
if encoding is None:
if is_darwin:
encoding = "utf-8"
elif is_win:
encoding = "mbcs"
else:
encoding = "ascii"
encoding = _normalize_codec(encoding)
return encoding | python | {
"resource": ""
} |
q36061 | MetadataBlock._writeblock | train | def _writeblock(cls, block, is_last=False):
"""Returns the block content + header.
Raises error.
"""
data = bytearray()
code = (block.code | 128) if is_last else block.code
datum = block.write()
size = len(datum)
if size > cls._MAX_SIZE:
if block._distrust_size and block._invalid_overflow_size != -1:
# The original size of this block was (1) wrong and (2)
# the real size doesn't allow us to save the file
# according to the spec (too big for 24 bit uint). Instead
# simply write back the original wrong size.. at least
# we don't make the file more "broken" as it is.
size = block._invalid_overflow_size
else:
raise error("block is too long to write")
assert not size > cls._MAX_SIZE
length = struct.pack(">I", size)[-3:]
data.append(code)
data += length
data += datum
return data | python | {
"resource": ""
} |
q36062 | MetadataBlock._writeblocks | train | def _writeblocks(cls, blocks, available, cont_size, padding_func):
"""Render metadata block as a byte string."""
# write everything except padding
data = bytearray()
for block in blocks:
if isinstance(block, Padding):
continue
data += cls._writeblock(block)
blockssize = len(data)
# take the padding overhead into account. we always add one
# to make things simple.
padding_block = Padding()
blockssize += len(cls._writeblock(padding_block))
# finally add a padding block
info = PaddingInfo(available - blockssize, cont_size)
padding_block.length = min(info._get_padding(padding_func),
cls._MAX_SIZE)
data += cls._writeblock(padding_block, is_last=True)
return data | python | {
"resource": ""
} |
q36063 | FLAC.add_tags | train | def add_tags(self):
"""Add a Vorbis comment block to the file."""
if self.tags is None:
self.tags = VCFLACDict()
self.metadata_blocks.append(self.tags)
else:
raise FLACVorbisError("a Vorbis comment already exists") | python | {
"resource": ""
} |
q36064 | FLAC.delete | train | def delete(self, filething=None):
"""Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used.
"""
if self.tags is not None:
temp_blocks = [
b for b in self.metadata_blocks if b.code != VCFLACDict.code]
self._save(filething, temp_blocks, False, padding=lambda x: 0)
self.metadata_blocks[:] = [
b for b in self.metadata_blocks
if b.code != VCFLACDict.code or b is self.tags]
self.tags.clear() | python | {
"resource": ""
} |
q36065 | FLAC.load | train | def load(self, filething):
"""Load file information from a filename."""
fileobj = filething.fileobj
self.metadata_blocks = []
self.tags = None
self.cuesheet = None
self.seektable = None
fileobj = StrictFileObject(fileobj)
self.__check_header(fileobj, filething.name)
while self.__read_metadata_block(fileobj):
pass
try:
self.metadata_blocks[0].length
except (AttributeError, IndexError):
raise FLACNoHeaderError("Stream info block not found")
if self.info.length:
start = fileobj.tell()
fileobj.seek(0, 2)
self.info.bitrate = int(
float(fileobj.tell() - start) * 8 / self.info.length)
else:
self.info.bitrate = 0 | python | {
"resource": ""
} |
q36066 | FLAC.clear_pictures | train | def clear_pictures(self):
"""Delete all pictures from the file."""
blocks = [b for b in self.metadata_blocks if b.code != Picture.code]
self.metadata_blocks = blocks | python | {
"resource": ""
} |
q36067 | FLAC.save | train | def save(self, filething=None, deleteid3=False, padding=None):
"""Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used.
"""
self._save(filething, self.metadata_blocks, deleteid3, padding) | python | {
"resource": ""
} |
q36068 | frame_from_fsnative | train | def frame_from_fsnative(arg):
"""Takes item from argv and returns ascii native str
or raises ValueError.
"""
assert isinstance(arg, fsnative)
text = fsn2text(arg, strict=True)
if PY2:
return text.encode("ascii")
else:
return text.encode("ascii").decode("ascii") | python | {
"resource": ""
} |
q36069 | value_from_fsnative | train | def value_from_fsnative(arg, escape):
"""Takes an item from argv and returns a text_type value without
surrogate escapes or raises ValueError.
"""
assert isinstance(arg, fsnative)
if escape:
bytes_ = fsn2bytes(arg)
if PY2:
bytes_ = bytes_.decode("string_escape")
else:
# With py3.7 this has started to warn for invalid escapes, but we
# don't control the input so ignore it.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bytes_ = codecs.escape_decode(bytes_)[0]
arg = bytes2fsn(bytes_)
text = fsn2text(arg, strict=True)
return text | python | {
"resource": ""
} |
q36070 | skip_id3 | train | def skip_id3(fileobj):
"""Might raise IOError"""
# WMP writes multiple id3s, so skip as many as we find
while True:
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = b'', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
fileobj.seek(insize, 1)
else:
fileobj.seek(-len(idata), 1)
break | python | {
"resource": ""
} |
q36071 | iter_sync | train | def iter_sync(fileobj, max_read):
"""Iterate over a fileobj and yields on each mpeg sync.
When yielding the fileobj offset is right before the sync and can be
changed between iterations without affecting the iteration process.
Might raise IOError.
"""
read = 0
size = 2
last_byte = b""
is_second = lambda b: ord(b) & 0xe0 == 0xe0
while read < max_read:
data_offset = fileobj.tell()
new_data = fileobj.read(min(max_read - read, size))
if not new_data:
return
read += len(new_data)
if last_byte == b"\xff" and is_second(new_data[0:1]):
fileobj.seek(data_offset - 1, 0)
yield
size *= 2
last_byte = new_data[-1:]
find_offset = 0
while True:
index = new_data.find(b"\xff", find_offset)
# if not found or the last byte -> read more
if index == -1 or index == len(new_data) - 1:
break
if is_second(new_data[index + 1:index + 2]):
fileobj.seek(data_offset + index, 0)
yield
find_offset = index + 1
fileobj.seek(data_offset + len(new_data), 0) | python | {
"resource": ""
} |
q36072 | MPEGFrame._parse_vbr_header | train | def _parse_vbr_header(self, fileobj, frame_offset, frame_size,
frame_length):
"""Does not raise"""
# Xing
xing_offset = XingHeader.get_offset(self)
fileobj.seek(frame_offset + xing_offset, 0)
try:
xing = XingHeader(fileobj)
except XingHeaderError:
pass
else:
lame = xing.lame_header
self.sketchy = False
self.bitrate_mode = _guess_xing_bitrate_mode(xing)
self.encoder_settings = xing.get_encoder_settings()
if xing.frames != -1:
samples = frame_size * xing.frames
if xing.bytes != -1 and samples > 0:
# the first frame is only included in xing.bytes but
# not in xing.frames, skip it.
audio_bytes = max(0, xing.bytes - frame_length)
self.bitrate = intround((
audio_bytes * 8 * self.sample_rate) / float(samples))
if lame is not None:
samples -= lame.encoder_delay_start
samples -= lame.encoder_padding_end
if samples < 0:
# older lame versions wrote bogus delay/padding for short
# files with low bitrate
samples = 0
self.length = float(samples) / self.sample_rate
if xing.lame_version_desc:
self.encoder_info = u"LAME %s" % xing.lame_version_desc
if lame is not None:
self.track_gain = lame.track_gain_adjustment
self.track_peak = lame.track_peak
self.album_gain = lame.album_gain_adjustment
return
# VBRI
vbri_offset = VBRIHeader.get_offset(self)
fileobj.seek(frame_offset + vbri_offset, 0)
try:
vbri = VBRIHeader(fileobj)
except VBRIHeaderError:
pass
else:
self.bitrate_mode = BitrateMode.VBR
self.encoder_info = u"FhG"
self.sketchy = False
self.length = float(frame_size * vbri.frames) / self.sample_rate
if self.length:
self.bitrate = int((vbri.bytes * 8) / self.length) | python | {
"resource": ""
} |
q36073 | EasyID3.RegisterTXXXKey | train | def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
for v in value:
if v and max(v) > u'\x7f':
enc = 3
break
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter) | python | {
"resource": ""
} |
q36074 | _get_value_type | train | def _get_value_type(kind):
"""Returns a _APEValue subclass or raises ValueError"""
if kind == TEXT:
return APETextValue
elif kind == BINARY:
return APEBinaryValue
elif kind == EXTERNAL:
return APEExtValue
raise ValueError("unknown kind %r" % kind) | python | {
"resource": ""
} |
q36075 | APEValue | train | def APEValue(value, kind):
"""APEv2 tag value factory.
Use this if you need to specify the value's type manually. Binary
and text data are automatically detected by APEv2.__setitem__.
"""
try:
type_ = _get_value_type(kind)
except ValueError:
raise ValueError("kind must be TEXT, BINARY, or EXTERNAL")
else:
return type_(value) | python | {
"resource": ""
} |
q36076 | APEv2.pprint | train | def pprint(self):
"""Return tag key=value pairs in a human-readable format."""
items = sorted(self.items())
return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items) | python | {
"resource": ""
} |
q36077 | APEv2.__parse_tag | train | def __parse_tag(self, tag, count):
"""Raises IOError and APEBadItemError"""
fileobj = cBytesIO(tag)
for i in xrange(count):
tag_data = fileobj.read(8)
# someone writes wrong item counts
if not tag_data:
break
if len(tag_data) != 8:
raise error
size = cdata.uint32_le(tag_data[:4])
flags = cdata.uint32_le(tag_data[4:8])
# Bits 1 and 2 bits are flags, 0-3
# Bit 0 is read/write flag, ignored
kind = (flags & 6) >> 1
if kind == 3:
raise APEBadItemError("value type must be 0, 1, or 2")
key = value = fileobj.read(1)
if not key:
raise APEBadItemError
while key[-1:] != b'\x00' and value:
value = fileobj.read(1)
if not value:
raise APEBadItemError
key += value
if key[-1:] == b"\x00":
key = key[:-1]
if PY3:
try:
key = key.decode("ascii")
except UnicodeError as err:
reraise(APEBadItemError, err, sys.exc_info()[2])
value = fileobj.read(size)
if len(value) != size:
raise APEBadItemError
value = _get_value_type(kind)._new(value)
self[key] = value | python | {
"resource": ""
} |
q36078 | APEv2.save | train | def save(self, filething=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer.
"""
fileobj = filething.fileobj
data = _APEv2Data(fileobj)
if data.is_at_start:
delete_bytes(fileobj, data.end - data.start, data.start)
elif data.start is not None:
fileobj.seek(data.start)
# Delete an ID3v1 tag if present, too.
fileobj.truncate()
fileobj.seek(0, 2)
tags = []
for key, value in self.items():
# Packed format for an item:
# 4B: Value length
# 4B: Value type
# Key name
# 1B: Null
# Key value
value_data = value._write()
if not isinstance(key, bytes):
key = key.encode("utf-8")
tag_data = bytearray()
tag_data += struct.pack("<2I", len(value_data), value.kind << 1)
tag_data += key + b"\0" + value_data
tags.append(bytes(tag_data))
# "APE tags items should be sorted ascending by size... This is
# not a MUST, but STRONGLY recommended. Actually the items should
# be sorted by importance/byte, but this is not feasible."
tags.sort(key=lambda tag: (len(tag), tag))
num_tags = len(tags)
tags = b"".join(tags)
header = bytearray(b"APETAGEX")
# version, tag size, item count, flags
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER | IS_HEADER)
header += b"\0" * 8
fileobj.write(header)
fileobj.write(tags)
footer = bytearray(b"APETAGEX")
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER)
footer += b"\0" * 8
fileobj.write(footer) | python | {
"resource": ""
} |
q36079 | ASFTags.as_dict | train | def as_dict(self):
"""Return a copy of the comment data in a real dict."""
d = {}
for key, value in self:
d.setdefault(key, []).append(value)
return d | python | {
"resource": ""
} |
q36080 | Frame._get_v23_frame | train | def _get_v23_frame(self, **kwargs):
"""Returns a frame copy which is suitable for writing into a v2.3 tag.
kwargs get passed to the specs.
"""
new_kwargs = {}
for checker in self._framespec:
name = checker.name
value = getattr(self, name)
new_kwargs[name] = checker._validate23(self, value, **kwargs)
for checker in self._optionalspec:
name = checker.name
if hasattr(self, name):
value = getattr(self, name)
new_kwargs[name] = checker._validate23(self, value, **kwargs)
return type(self)(**new_kwargs) | python | {
"resource": ""
} |
q36081 | Frame._readData | train | def _readData(self, id3, data):
"""Raises ID3JunkFrameError; Returns leftover data"""
for reader in self._framespec:
if len(data) or reader.handle_nodata:
try:
value, data = reader.read(id3, self, data)
except SpecError as e:
raise ID3JunkFrameError(e)
else:
raise ID3JunkFrameError("no data left")
self._setattr(reader.name, value)
for reader in self._optionalspec:
if len(data) or reader.handle_nodata:
try:
value, data = reader.read(id3, self, data)
except SpecError as e:
raise ID3JunkFrameError(e)
else:
break
self._setattr(reader.name, value)
return data | python | {
"resource": ""
} |
q36082 | Frame._fromData | train | def _fromData(cls, header, tflags, data):
"""Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
"""
if header.version >= header._V24:
if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN):
# The data length int is syncsafe in 2.4 (but not 2.3).
# However, we don't actually need the data length int,
# except to work around a QL 0.12 bug, and in that case
# all we need are the raw bytes.
datalen_bytes = data[:4]
data = data[4:]
if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
# Some things write synch-unsafe data with either the frame
# or global unsynch flag set. Try to load them as is.
# https://github.com/quodlibet/mutagen/issues/210
# https://github.com/quodlibet/mutagen/issues/223
pass
if tflags & Frame.FLAG24_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG24_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error:
# the initial mutagen that went out with QL 0.12 did not
# write the 4 bytes of uncompressed size. Compensate.
data = datalen_bytes + data
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(
'zlib: %s: %r' % (err, data))
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG23_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError('zlib: %s: %r' % (err, data))
frame = cls()
frame._readData(header, data)
return frame | python | {
"resource": ""
} |
q36083 | LAMEHeader.parse_version | train | def parse_version(cls, fileobj):
"""Returns a version string and True if a LAMEHeader follows.
The passed file object will be positioned right before the
lame header if True.
Raises LAMEError if there is no lame version info.
"""
# http://wiki.hydrogenaud.io/index.php?title=LAME_version_string
data = fileobj.read(20)
if len(data) != 20:
raise LAMEError("Not a lame header")
if not data.startswith((b"LAME", b"L3.99")):
raise LAMEError("Not a lame header")
data = data.lstrip(b"EMAL")
major, data = data[0:1], data[1:].lstrip(b".")
minor = b""
for c in iterbytes(data):
if not c.isdigit():
break
minor += c
data = data[len(minor):]
try:
major = int(major.decode("ascii"))
minor = int(minor.decode("ascii"))
except ValueError:
raise LAMEError
# the extended header was added sometimes in the 3.90 cycle
# e.g. "LAME3.90 (alpha)" should still stop here.
# (I have seen such a file)
if (major, minor) < (3, 90) or (
(major, minor) == (3, 90) and data[-11:-10] == b"("):
flag = data.strip(b"\x00").rstrip().decode("ascii")
return (major, minor), u"%d.%d%s" % (major, minor, flag), False
if len(data) < 11:
raise LAMEError("Invalid version: too long")
flag = data[:-11].rstrip(b"\x00")
flag_string = u""
patch = u""
if flag == b"a":
flag_string = u" (alpha)"
elif flag == b"b":
flag_string = u" (beta)"
elif flag == b"r":
patch = u".1+"
elif flag == b" ":
if (major, minor) > (3, 96):
patch = u".0"
else:
patch = u".0+"
elif flag == b"" or flag == b".":
patch = u".0+"
else:
flag_string = u" (?)"
# extended header, seek back to 9 bytes for the caller
fileobj.seek(-11, 1)
return (major, minor), \
u"%d.%d%s%s" % (major, minor, patch, flag_string), True | python | {
"resource": ""
} |
q36084 | XingHeader.get_encoder_settings | train | def get_encoder_settings(self):
"""Returns the guessed encoder settings"""
if self.lame_header is None:
return u""
return self.lame_header.guess_settings(*self.lame_version) | python | {
"resource": ""
} |
q36085 | XingHeader.get_offset | train | def get_offset(cls, info):
"""Calculate the offset to the Xing header from the start of the
MPEG header including sync based on the MPEG header's content.
"""
assert info.layer == 3
if info.version == 1:
if info.mode != 3:
return 36
else:
return 21
else:
if info.mode != 3:
return 21
else:
return 13 | python | {
"resource": ""
} |
q36086 | get_windows_env_var | train | def get_windows_env_var(key):
"""Get an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
buf = ctypes.create_unicode_buffer(32767)
stored = winapi.GetEnvironmentVariableW(key, buf, 32767)
if stored == 0:
raise ctypes.WinError()
return buf[:stored] | python | {
"resource": ""
} |
q36087 | set_windows_env_var | train | def set_windows_env_var(key, value):
"""Set an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
if not isinstance(value, text_type):
raise TypeError("%r not of type %r" % (value, text_type))
status = winapi.SetEnvironmentVariableW(key, value)
if status == 0:
raise ctypes.WinError() | python | {
"resource": ""
} |
q36088 | del_windows_env_var | train | def del_windows_env_var(key):
"""Delete an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
status = winapi.SetEnvironmentVariableW(key, None)
if status == 0:
raise ctypes.WinError() | python | {
"resource": ""
} |
q36089 | read_windows_environ | train | def read_windows_environ():
"""Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError
"""
res = winapi.GetEnvironmentStringsW()
if not res:
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u""
i = 0
while 1:
c = res[i]
i += 1
if c == u"\x00":
if not current:
break
done.append(current)
current = u""
continue
current += c
dict_ = {}
for entry in done:
try:
key, value = entry.split(u"=", 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if status == 0:
raise ctypes.WinError()
return dict_ | python | {
"resource": ""
} |
q36090 | getenv | train | def getenv(key, value=None):
"""Like `os.getenv` but returns unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (object): The value to return if the env var does not exist
Returns:
`fsnative` or `object`:
The env var or the passed value if it doesn't exist
"""
key = path2fsn(key)
if is_win and PY2:
return environ.get(key, value)
return os.getenv(key, value) | python | {
"resource": ""
} |
q36091 | unsetenv | train | def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key) | python | {
"resource": ""
} |
q36092 | putenv | train | def putenv(key, value):
"""Like `os.putenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (pathlike): The value to set
Raises:
ValueError
"""
key = path2fsn(key)
value = path2fsn(value)
if is_win and PY2:
try:
set_windows_env_var(key, value)
except WindowsError:
# py3 + win fails here
raise ValueError
else:
try:
os.putenv(key, value)
except OSError:
# win + py3 raise here for invalid keys which is probably a bug.
# ValueError seems better
raise ValueError | python | {
"resource": ""
} |
q36093 | _WavPackHeader.from_fileobj | train | def from_fileobj(cls, fileobj):
"""A new _WavPackHeader or raises WavPackHeaderError"""
header = fileobj.read(32)
if len(header) != 32 or not header.startswith(b"wvpk"):
raise WavPackHeaderError("not a WavPack header: %r" % header)
block_size = cdata.uint_le(header[4:8])
version = cdata.ushort_le(header[8:10])
track_no = ord(header[10:11])
index_no = ord(header[11:12])
samples = cdata.uint_le(header[12:16])
if samples == 2 ** 32 - 1:
samples = -1
block_index = cdata.uint_le(header[16:20])
block_samples = cdata.uint_le(header[20:24])
flags = cdata.uint_le(header[24:28])
crc = cdata.uint_le(header[28:32])
return _WavPackHeader(block_size, version, track_no, index_no,
samples, block_index, block_samples, flags, crc) | python | {
"resource": ""
} |
q36094 | determine_bpi | train | def determine_bpi(data, frames, EMPTY=b"\x00" * 10):
"""Takes id3v2.4 frame data and determines if ints or bitpaddedints
should be used for parsing. Needed because iTunes used to write
normal ints for frame sizes.
"""
# count number of tags found as BitPaddedInt and how far past
o = 0
asbpi = 0
while o < len(data) - 10:
part = data[o:o + 10]
if part == EMPTY:
bpioff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack('>4sLH', part)
size = BitPaddedInt(size)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asbpi += 1
else:
bpioff = o - len(data)
# count number of tags found as int and how far past
o = 0
asint = 0
while o < len(data) - 10:
part = data[o:o + 10]
if part == EMPTY:
intoff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack('>4sLH', part)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asint += 1
else:
intoff = o - len(data)
# if more tags as int, or equal and bpi is past and int is not
if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)):
return int
return BitPaddedInt | python | {
"resource": ""
} |
q36095 | read_frames | train | def read_frames(id3, data, frames):
"""Does not error out"""
assert id3.version >= ID3Header._V22
result = []
unsupported_frames = []
if id3.version < ID3Header._V24 and id3.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
pass
if id3.version >= ID3Header._V23:
if id3.version < ID3Header._V24:
bpi = int
else:
bpi = determine_bpi(data, frames)
while data:
header = data[:10]
try:
name, size, flags = struct.unpack('>4sLH', header)
except struct.error:
break # not enough header
if name.strip(b'\x00') == b'':
break
size = bpi(size)
framedata = data[10:10 + size]
data = data[10 + size:]
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
# someone writes 2.3 frames with 2.2 names
if name[-1] == "\x00":
tag = Frames_2_2[name[:-1]]
name = tag.__base__.__name__
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(tag._fromData(id3, flags, framedata))
except NotImplementedError:
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
elif id3.version >= ID3Header._V22:
while data:
header = data[0:6]
try:
name, size = struct.unpack('>3s3s', header)
except struct.error:
break # not enough header
size, = struct.unpack('>L', b'\x00' + size)
if name.strip(b'\x00') == b'':
break
framedata = data[6:6 + size]
data = data[6 + size:]
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(
tag._fromData(id3, 0, framedata))
except (ID3EncryptionUnsupportedError,
NotImplementedError):
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
return result, unsupported_frames, data | python | {
"resource": ""
} |
q36096 | ID3Tags.setall | train | def setall(self, key, values):
"""Delete frames of the given type and add frames in 'values'.
Args:
key (text): key for frames to delete
values (list[Frame]): frames to add
"""
self.delall(key)
for tag in values:
self[tag.HashKey] = tag | python | {
"resource": ""
} |
q36097 | ID3Tags._add | train | def _add(self, frame, strict):
"""Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged.
"""
if not isinstance(frame, Frame):
raise TypeError("%r not a Frame instance" % frame)
orig_frame = frame
frame = frame._upgrade_frame()
if frame is None:
if not strict:
return
raise TypeError(
"Can't upgrade %r frame" % type(orig_frame).__name__)
hash_key = frame.HashKey
if strict or hash_key not in self:
self[hash_key] = frame
return
# Try to merge frames, or change the new one. Since changing
# the new one can lead to new conflicts, try until everything is
# either merged or added.
while True:
old_frame = self[hash_key]
new_frame = old_frame._merge_frame(frame)
new_hash = new_frame.HashKey
if new_hash == hash_key:
self[hash_key] = new_frame
break
else:
assert new_frame is frame
if new_hash not in self:
self[new_hash] = new_frame
break
hash_key = new_hash | python | {
"resource": ""
} |
q36098 | ID3Tags.__update_common | train | def __update_common(self):
"""Updates done by both v23 and v24 update"""
if "TCON" in self:
# Get rid of "(xx)Foobr" format.
self["TCON"].genres = self["TCON"].genres
mimes = {"PNG": "image/png", "JPG": "image/jpeg"}
for pic in self.getall("APIC"):
if pic.mime in mimes:
newpic = APIC(
encoding=pic.encoding, mime=mimes[pic.mime],
type=pic.type, desc=pic.desc, data=pic.data)
self.add(newpic) | python | {
"resource": ""
} |
q36099 | ID3Tags.update_to_v24 | train | def update_to_v24(self):
"""Convert older tags into an ID3v2.4 tag.
This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to
TDRC). If you intend to save tags, you must call this function
at some point; it is called by default when loading the tag.
"""
self.__update_common()
# TDAT, TYER, and TIME have been turned into TDRC.
try:
date = text_type(self.get("TYER", ""))
if date.strip(u"\x00"):
self.pop("TYER")
dat = text_type(self.get("TDAT", ""))
if dat.strip("\x00"):
self.pop("TDAT")
date = "%s-%s-%s" % (date, dat[2:], dat[:2])
time = text_type(self.get("TIME", ""))
if time.strip("\x00"):
self.pop("TIME")
date += "T%s:%s:00" % (time[:2], time[2:])
if "TDRC" not in self:
self.add(TDRC(encoding=0, text=date))
except UnicodeDecodeError:
# Old ID3 tags have *lots* of Unicode problems, so if TYER
# is bad, just chuck the frames.
pass
# TORY can be the first part of a TDOR.
if "TORY" in self:
f = self.pop("TORY")
if "TDOR" not in self:
try:
self.add(TDOR(encoding=0, text=str(f)))
except UnicodeDecodeError:
pass
# IPLS is now TIPL.
if "IPLS" in self:
f = self.pop("IPLS")
if "TIPL" not in self:
self.add(TIPL(encoding=f.encoding, people=f.people))
# These can't be trivially translated to any ID3v2.4 tags, or
# should have been removed already.
for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]:
if key in self:
del(self[key])
# Recurse into chapters
for f in self.getall("CHAP"):
f.sub_frames.update_to_v24()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v24() | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.