id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,500
|
flac.py
|
rembo10_headphones/lib/mutagen/flac.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write FLAC Vorbis comments and stream information.
Read more about FLAC at http://flac.sourceforge.net.
FLAC supports arbitrary metadata blocks. The two most interesting ones
are the FLAC stream information block, and the Vorbis comment block;
these are also the only ones Mutagen can currently read.
This module does not handle Ogg FLAC files.
Based off documentation available at
http://flac.sourceforge.net/format.html
"""
__all__ = ["FLAC", "Open", "delete"]
import struct
from io import BytesIO
from ._vorbis import VCommentDict
import mutagen
from mutagen._util import resize_bytes, MutagenError, get_size, loadfile, \
convert_error, bchr, endswith
from mutagen._tags import PaddingInfo
from mutagen.id3._util import BitPaddedInt
from functools import reduce
class error(MutagenError):
pass
class FLACNoHeaderError(error):
pass
class FLACVorbisError(ValueError, error):
pass
def to_int_be(data):
"""Convert an arbitrarily-long string to a long using big-endian
byte order."""
return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0)
class StrictFileObject(object):
"""Wraps a file-like object and raises an exception if the requested
amount of data to read isn't returned."""
def __init__(self, fileobj):
self._fileobj = fileobj
for m in ["close", "tell", "seek", "write", "name", "flush",
"truncate"]:
if hasattr(fileobj, m):
setattr(self, m, getattr(fileobj, m))
def read(self, size=-1):
data = self._fileobj.read(size)
if size >= 0 and len(data) != size:
raise error("file said %d bytes, read %d bytes" % (
size, len(data)))
return data
def tryread(self, *args):
return self._fileobj.read(*args)
class MetadataBlock(object):
"""A generic block of FLAC metadata.
This class is extended by specific used as an ancestor for more specific
blocks, and also as a container for data blobs of unknown blocks.
Attributes:
data (`bytes`): raw binary data for this block
"""
_distrust_size = False
"""For block types setting this, we don't trust the size field and
use the size of the content instead."""
_invalid_overflow_size = -1
"""In case the real size was bigger than what is representable by the
24 bit size field, we save the wrong specified size here. This can
only be set if _distrust_size is True"""
_MAX_SIZE = 2 ** 24 - 1
def __init__(self, data):
"""Parse the given data string or file-like as a metadata block.
The metadata header should not be included."""
if data is not None:
if not isinstance(data, StrictFileObject):
if isinstance(data, bytes):
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError(
"StreamInfo requires string data or a file-like")
data = StrictFileObject(data)
self.load(data)
def load(self, data):
self.data = data.read()
def write(self):
return self.data
@classmethod
def _writeblock(cls, block, is_last=False):
"""Returns the block content + header.
Raises error.
"""
data = bytearray()
code = (block.code | 128) if is_last else block.code
datum = block.write()
size = len(datum)
if size > cls._MAX_SIZE:
if block._distrust_size and block._invalid_overflow_size != -1:
# The original size of this block was (1) wrong and (2)
# the real size doesn't allow us to save the file
# according to the spec (too big for 24 bit uint). Instead
# simply write back the original wrong size.. at least
# we don't make the file more "broken" as it is.
size = block._invalid_overflow_size
else:
raise error("block is too long to write")
assert not size > cls._MAX_SIZE
length = struct.pack(">I", size)[-3:]
data.append(code)
data += length
data += datum
return data
@classmethod
def _writeblocks(cls, blocks, available, cont_size, padding_func):
"""Render metadata block as a byte string."""
# write everything except padding
data = bytearray()
for block in blocks:
if isinstance(block, Padding):
continue
data += cls._writeblock(block)
blockssize = len(data)
# take the padding overhead into account. we always add one
# to make things simple.
padding_block = Padding()
blockssize += len(cls._writeblock(padding_block))
# finally add a padding block
info = PaddingInfo(available - blockssize, cont_size)
padding_block.length = min(info._get_padding(padding_func),
cls._MAX_SIZE)
data += cls._writeblock(padding_block, is_last=True)
return data
class StreamInfo(MetadataBlock, mutagen.StreamInfo):
"""StreamInfo()
FLAC stream information.
This contains information about the audio data in the FLAC file.
Unlike most stream information objects in Mutagen, changes to this
one will rewritten to the file when it is saved. Unless you are
actually changing the audio stream itself, don't change any
attributes of this block.
Attributes:
min_blocksize (`int`): minimum audio block size
max_blocksize (`int`): maximum audio block size
sample_rate (`int`): audio sample rate in Hz
channels (`int`): audio channels (1 for mono, 2 for stereo)
bits_per_sample (`int`): bits per sample
total_samples (`int`): total samples in file
length (`float`): audio length in seconds
bitrate (`int`): bitrate in bits per second, as an int
"""
code = 0
bitrate = 0
def __eq__(self, other):
try:
return (self.min_blocksize == other.min_blocksize and
self.max_blocksize == other.max_blocksize and
self.sample_rate == other.sample_rate and
self.channels == other.channels and
self.bits_per_sample == other.bits_per_sample and
self.total_samples == other.total_samples)
except Exception:
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.min_blocksize = int(to_int_be(data.read(2)))
self.max_blocksize = int(to_int_be(data.read(2)))
self.min_framesize = int(to_int_be(data.read(3)))
self.max_framesize = int(to_int_be(data.read(3)))
# first 16 bits of sample rate
sample_first = to_int_be(data.read(2))
# last 4 bits of sample rate, 3 of channels, first 1 of bits/sample
sample_channels_bps = to_int_be(data.read(1))
# last 4 of bits/sample, 36 of total samples
bps_total = to_int_be(data.read(5))
sample_tail = sample_channels_bps >> 4
self.sample_rate = int((sample_first << 4) + sample_tail)
if not self.sample_rate:
raise error("A sample rate value of 0 is invalid")
self.channels = int(((sample_channels_bps >> 1) & 7) + 1)
bps_tail = bps_total >> 36
bps_head = (sample_channels_bps & 1) << 4
self.bits_per_sample = int(bps_head + bps_tail + 1)
self.total_samples = bps_total & 0xFFFFFFFFF
self.length = self.total_samples / float(self.sample_rate)
self.md5_signature = to_int_be(data.read(16))
def write(self):
f = BytesIO()
f.write(struct.pack(">I", self.min_blocksize)[-2:])
f.write(struct.pack(">I", self.max_blocksize)[-2:])
f.write(struct.pack(">I", self.min_framesize)[-3:])
f.write(struct.pack(">I", self.max_framesize)[-3:])
# first 16 bits of sample rate
f.write(struct.pack(">I", self.sample_rate >> 4)[-2:])
# 4 bits sample, 3 channel, 1 bps
byte = (self.sample_rate & 0xF) << 4
byte += ((self.channels - 1) & 7) << 1
byte += ((self.bits_per_sample - 1) >> 4) & 1
f.write(bchr(byte))
# 4 bits of bps, 4 of sample count
byte = ((self.bits_per_sample - 1) & 0xF) << 4
byte += (self.total_samples >> 32) & 0xF
f.write(bchr(byte))
# last 32 of sample count
f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF))
# MD5 signature
sig = self.md5_signature
f.write(struct.pack(
">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF,
(sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF))
return f.getvalue()
def pprint(self):
return u"FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate)
class SeekPoint(tuple):
"""SeekPoint()
A single seek point in a FLAC file.
Placeholder seek points have first_sample of 0xFFFFFFFFFFFFFFFFL,
and byte_offset and num_samples undefined. Seek points must be
sorted in ascending order by first_sample number. Seek points must
be unique by first_sample number, except for placeholder
points. Placeholder points must occur last in the table and there
may be any number of them.
Attributes:
first_sample (`int`): sample number of first sample in the target frame
byte_offset (`int`): offset from first frame to target frame
num_samples (`int`): number of samples in target frame
"""
def __new__(cls, first_sample, byte_offset, num_samples):
return super(cls, SeekPoint).__new__(
cls, (first_sample, byte_offset, num_samples))
def __getnewargs__(self):
return self.first_sample, self.byte_offset, self.num_samples
first_sample = property(lambda self: self[0])
byte_offset = property(lambda self: self[1])
num_samples = property(lambda self: self[2])
class SeekTable(MetadataBlock):
"""Read and write FLAC seek tables.
Attributes:
seekpoints: list of SeekPoint objects
"""
__SEEKPOINT_FORMAT = '>QQH'
__SEEKPOINT_SIZE = struct.calcsize(__SEEKPOINT_FORMAT)
code = 3
def __init__(self, data):
self.seekpoints = []
super(SeekTable, self).__init__(data)
def __eq__(self, other):
try:
return (self.seekpoints == other.seekpoints)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.seekpoints = []
sp = data.tryread(self.__SEEKPOINT_SIZE)
while len(sp) == self.__SEEKPOINT_SIZE:
self.seekpoints.append(SeekPoint(
*struct.unpack(self.__SEEKPOINT_FORMAT, sp)))
sp = data.tryread(self.__SEEKPOINT_SIZE)
def write(self):
f = BytesIO()
for seekpoint in self.seekpoints:
packed = struct.pack(
self.__SEEKPOINT_FORMAT,
seekpoint.first_sample, seekpoint.byte_offset,
seekpoint.num_samples)
f.write(packed)
return f.getvalue()
def __repr__(self):
return "<%s seekpoints=%r>" % (type(self).__name__, self.seekpoints)
class VCFLACDict(VCommentDict):
"""VCFLACDict()
Read and write FLAC Vorbis comments.
FLACs don't use the framing bit at the end of the comment block.
So this extends VCommentDict to not use the framing bit.
"""
code = 4
_distrust_size = True
def load(self, data, errors='replace', framing=False):
super(VCFLACDict, self).load(data, errors=errors, framing=framing)
def write(self, framing=False):
return super(VCFLACDict, self).write(framing=framing)
class CueSheetTrackIndex(tuple):
"""CueSheetTrackIndex(index_number, index_offset)
Index for a track in a cuesheet.
For CD-DA, an index_number of 0 corresponds to the track
pre-gap. The first index in a track must have a number of 0 or 1,
and subsequently, index_numbers must increase by 1. Index_numbers
must be unique within a track. And index_offset must be evenly
divisible by 588 samples.
Attributes:
index_number (`int`): index point number
index_offset (`int`): offset in samples from track start
"""
def __new__(cls, index_number, index_offset):
return super(cls, CueSheetTrackIndex).__new__(
cls, (index_number, index_offset))
index_number = property(lambda self: self[0])
index_offset = property(lambda self: self[1])
class CueSheetTrack(object):
"""CueSheetTrack()
A track in a cuesheet.
For CD-DA, track_numbers must be 1-99, or 170 for the
lead-out. Track_numbers must be unique within a cue sheet. There
must be atleast one index in every track except the lead-out track
which must have none.
Attributes:
track_number (`int`): track number
start_offset (`int`): track offset in samples from start of FLAC stream
isrc (`mutagen.text`): ISRC code, exactly 12 characters
type (`int`): 0 for audio, 1 for digital data
pre_emphasis (`bool`): true if the track is recorded with pre-emphasis
indexes (list[CueSheetTrackIndex]):
list of CueSheetTrackIndex objects
"""
def __init__(self, track_number, start_offset, isrc='', type_=0,
pre_emphasis=False):
self.track_number = track_number
self.start_offset = start_offset
self.isrc = isrc
self.type = type_
self.pre_emphasis = pre_emphasis
self.indexes = []
def __eq__(self, other):
try:
return (self.track_number == other.track_number and
self.start_offset == other.start_offset and
self.isrc == other.isrc and
self.type == other.type and
self.pre_emphasis == other.pre_emphasis and
self.indexes == other.indexes)
except (AttributeError, TypeError):
return False
__hash__ = object.__hash__
def __repr__(self):
return (("<%s number=%r, offset=%d, isrc=%r, type=%r, "
"pre_emphasis=%r, indexes=%r)>") %
(type(self).__name__, self.track_number, self.start_offset,
self.isrc, self.type, self.pre_emphasis, self.indexes))
class CueSheet(MetadataBlock):
"""CueSheet()
Read and write FLAC embedded cue sheets.
Number of tracks should be from 1 to 100. There should always be
exactly one lead-out track and that track must be the last track
in the cue sheet.
Attributes:
media_catalog_number (`mutagen.text`): media catalog number in ASCII,
up to 128 characters
lead_in_samples (`int`): number of lead-in samples
compact_disc (`bool`): true if the cuesheet corresponds to a
compact disc
tracks (list[CueSheetTrack]):
list of CueSheetTrack objects
lead_out (`CueSheetTrack` or `None`):
lead-out as CueSheetTrack or None if lead-out was not found
"""
__CUESHEET_FORMAT = '>128sQB258xB'
__CUESHEET_SIZE = struct.calcsize(__CUESHEET_FORMAT)
__CUESHEET_TRACK_FORMAT = '>QB12sB13xB'
__CUESHEET_TRACK_SIZE = struct.calcsize(__CUESHEET_TRACK_FORMAT)
__CUESHEET_TRACKINDEX_FORMAT = '>QB3x'
__CUESHEET_TRACKINDEX_SIZE = struct.calcsize(__CUESHEET_TRACKINDEX_FORMAT)
code = 5
media_catalog_number = b''
lead_in_samples = 88200
compact_disc = True
def __init__(self, data):
self.tracks = []
super(CueSheet, self).__init__(data)
def __eq__(self, other):
try:
return (self.media_catalog_number == other.media_catalog_number and
self.lead_in_samples == other.lead_in_samples and
self.compact_disc == other.compact_disc and
self.tracks == other.tracks)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
header = data.read(self.__CUESHEET_SIZE)
media_catalog_number, lead_in_samples, flags, num_tracks = \
struct.unpack(self.__CUESHEET_FORMAT, header)
self.media_catalog_number = media_catalog_number.rstrip(b'\0')
self.lead_in_samples = lead_in_samples
self.compact_disc = bool(flags & 0x80)
self.tracks = []
for i in range(num_tracks):
track = data.read(self.__CUESHEET_TRACK_SIZE)
start_offset, track_number, isrc_padded, flags, num_indexes = \
struct.unpack(self.__CUESHEET_TRACK_FORMAT, track)
isrc = isrc_padded.rstrip(b'\0')
type_ = (flags & 0x80) >> 7
pre_emphasis = bool(flags & 0x40)
val = CueSheetTrack(
track_number, start_offset, isrc, type_, pre_emphasis)
for j in range(num_indexes):
index = data.read(self.__CUESHEET_TRACKINDEX_SIZE)
index_offset, index_number = struct.unpack(
self.__CUESHEET_TRACKINDEX_FORMAT, index)
val.indexes.append(
CueSheetTrackIndex(index_number, index_offset))
self.tracks.append(val)
def write(self):
f = BytesIO()
flags = 0
if self.compact_disc:
flags |= 0x80
packed = struct.pack(
self.__CUESHEET_FORMAT, self.media_catalog_number,
self.lead_in_samples, flags, len(self.tracks))
f.write(packed)
for track in self.tracks:
track_flags = 0
track_flags |= (track.type & 1) << 7
if track.pre_emphasis:
track_flags |= 0x40
track_packed = struct.pack(
self.__CUESHEET_TRACK_FORMAT, track.start_offset,
track.track_number, track.isrc, track_flags,
len(track.indexes))
f.write(track_packed)
for index in track.indexes:
index_packed = struct.pack(
self.__CUESHEET_TRACKINDEX_FORMAT,
index.index_offset, index.index_number)
f.write(index_packed)
return f.getvalue()
def __repr__(self):
return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, "
"tracks=%r>") %
(type(self).__name__, self.media_catalog_number,
self.lead_in_samples, self.compact_disc, self.tracks))
class Picture(MetadataBlock):
"""Picture()
Read and write FLAC embed pictures.
.. currentmodule:: mutagen
Attributes:
type (`id3.PictureType`): picture type
(same as types for ID3 APIC frames)
mime (`text`): MIME type of the picture
desc (`text`): picture's description
width (`int`): width in pixels
height (`int`): height in pixels
depth (`int`): color depth in bits-per-pixel
colors (`int`): number of colors for indexed palettes (like GIF),
0 for non-indexed
data (`bytes`): picture data
To create a picture from file (in order to add to a FLAC file),
instantiate this object without passing anything to the constructor and
then set the properties manually::
p = Picture()
with open("Folder.jpg", "rb") as f:
pic.data = f.read()
pic.type = id3.PictureType.COVER_FRONT
pic.mime = u"image/jpeg"
pic.width = 500
pic.height = 500
pic.depth = 16 # color depth
"""
code = 6
_distrust_size = True
def __init__(self, data=None):
self.type = 0
self.mime = u''
self.desc = u''
self.width = 0
self.height = 0
self.depth = 0
self.colors = 0
self.data = b''
super(Picture, self).__init__(data)
def __eq__(self, other):
try:
return (self.type == other.type and
self.mime == other.mime and
self.desc == other.desc and
self.width == other.width and
self.height == other.height and
self.depth == other.depth and
self.colors == other.colors and
self.data == other.data)
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
self.type, length = struct.unpack('>2I', data.read(8))
self.mime = data.read(length).decode('UTF-8', 'replace')
length, = struct.unpack('>I', data.read(4))
self.desc = data.read(length).decode('UTF-8', 'replace')
(self.width, self.height, self.depth,
self.colors, length) = struct.unpack('>5I', data.read(20))
self.data = data.read(length)
def write(self):
f = BytesIO()
mime = self.mime.encode('UTF-8')
f.write(struct.pack('>2I', self.type, len(mime)))
f.write(mime)
desc = self.desc.encode('UTF-8')
f.write(struct.pack('>I', len(desc)))
f.write(desc)
f.write(struct.pack('>5I', self.width, self.height, self.depth,
self.colors, len(self.data)))
f.write(self.data)
return f.getvalue()
def __repr__(self):
return "<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime,
len(self.data))
class Padding(MetadataBlock):
"""Padding()
Empty padding space for metadata blocks.
To avoid rewriting the entire FLAC file when editing comments,
metadata is often padded. Padding should occur at the end, and no
more than one padding block should be in any FLAC file.
Attributes:
length (`int`): length
"""
code = 1
def __init__(self, data=b""):
super(Padding, self).__init__(data)
def load(self, data):
self.length = len(data.read())
def write(self):
try:
return b"\x00" * self.length
# On some 64 bit platforms this won't generate a MemoryError
# or OverflowError since you might have enough RAM, but it
# still generates a ValueError. On other 64 bit platforms,
# this will still succeed for extremely large values.
# Those should never happen in the real world, and if they
# do, writeblocks will catch it.
except (OverflowError, ValueError, MemoryError):
raise error("cannot write %d bytes" % self.length)
def __eq__(self, other):
return isinstance(other, Padding) and self.length == other.length
__hash__ = MetadataBlock.__hash__
def __repr__(self):
return "<%s (%d bytes)>" % (type(self).__name__, self.length)
class FLAC(mutagen.FileType):
"""FLAC(filething)
A FLAC audio file.
Args:
filething (filething)
Attributes:
cuesheet (`CueSheet`): if any or `None`
seektable (`SeekTable`): if any or `None`
pictures (list[Picture]): list of embedded pictures
info (`StreamInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_mimes = ["audio/flac", "audio/x-flac", "application/x-flac"]
info = None
tags = None
METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict,
CueSheet, Picture]
"""Known metadata block types, indexed by ID."""
@staticmethod
def score(filename, fileobj, header_data):
return (header_data.startswith(b"fLaC") +
endswith(filename.lower(), ".flac") * 3)
def __read_metadata_block(self, fileobj):
byte = ord(fileobj.read(1))
size = to_int_be(fileobj.read(3))
code = byte & 0x7F
last_block = bool(byte & 0x80)
try:
block_type = self.METADATA_BLOCKS[code] or MetadataBlock
except IndexError:
block_type = MetadataBlock
if block_type._distrust_size:
# Some jackass is writing broken Metadata block length
# for Vorbis comment blocks, and the FLAC reference
# implementaton can parse them (mostly by accident),
# so we have to too. Instead of parsing the size
# given, parse an actual Vorbis comment, leaving
# fileobj in the right position.
# https://github.com/quodlibet/mutagen/issues/52
# ..same for the Picture block:
# https://github.com/quodlibet/mutagen/issues/106
start = fileobj.tell()
block = block_type(fileobj)
real_size = fileobj.tell() - start
if real_size > MetadataBlock._MAX_SIZE:
block._invalid_overflow_size = size
else:
data = fileobj.read(size)
block = block_type(data)
block.code = code
if block.code == VCFLACDict.code:
if self.tags is None:
self.tags = block
else:
# https://github.com/quodlibet/mutagen/issues/377
# Something writes multiple and metaflac doesn't care
pass
elif block.code == CueSheet.code:
if self.cuesheet is None:
self.cuesheet = block
else:
raise error("> 1 CueSheet block found")
elif block.code == SeekTable.code:
if self.seektable is None:
self.seektable = block
else:
raise error("> 1 SeekTable block found")
self.metadata_blocks.append(block)
return not last_block
def add_tags(self):
"""Add a Vorbis comment block to the file."""
if self.tags is None:
self.tags = VCFLACDict()
self.metadata_blocks.append(self.tags)
else:
raise FLACVorbisError("a Vorbis comment already exists")
add_vorbiscomment = add_tags
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething=None):
"""Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used.
"""
if self.tags is not None:
temp_blocks = [
b for b in self.metadata_blocks if b.code != VCFLACDict.code]
self._save(filething, temp_blocks, False, padding=lambda x: 0)
self.metadata_blocks[:] = [
b for b in self.metadata_blocks
if b.code != VCFLACDict.code or b is self.tags]
self.tags.clear()
vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.")
@convert_error(IOError, error)
@loadfile()
def load(self, filething):
"""Load file information from a filename."""
fileobj = filething.fileobj
self.metadata_blocks = []
self.tags = None
self.cuesheet = None
self.seektable = None
fileobj = StrictFileObject(fileobj)
self.__check_header(fileobj, filething.name)
while self.__read_metadata_block(fileobj):
pass
try:
self.metadata_blocks[0].length
except (AttributeError, IndexError):
raise FLACNoHeaderError("Stream info block not found")
if self.info.length:
start = fileobj.tell()
fileobj.seek(0, 2)
self.info.bitrate = int(
float(fileobj.tell() - start) * 8 / self.info.length)
else:
self.info.bitrate = 0
@property
def info(self):
return self.metadata_blocks[0]
def add_picture(self, picture):
"""Add a new picture to the file.
Args:
picture (Picture)
"""
self.metadata_blocks.append(picture)
def clear_pictures(self):
"""Delete all pictures from the file."""
blocks = [b for b in self.metadata_blocks if b.code != Picture.code]
self.metadata_blocks = blocks
@property
def pictures(self):
return [b for b in self.metadata_blocks if b.code == Picture.code]
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, deleteid3=False, padding=None):
"""Save metadata blocks to a file.
Args:
filething (filething)
deleteid3 (bool): delete id3 tags while at it
padding (:obj:`mutagen.PaddingFunction`)
If no filename is given, the one most recently loaded is used.
"""
self._save(filething, self.metadata_blocks, deleteid3, padding)
def _save(self, filething, metadata_blocks, deleteid3, padding):
f = StrictFileObject(filething.fileobj)
header = self.__check_header(f, filething.name)
audio_offset = self.__find_audio_offset(f)
# "fLaC" and maybe ID3
available = audio_offset - header
# Delete ID3v2
if deleteid3 and header > 4:
available += header - 4
header = 4
content_size = get_size(f) - audio_offset
assert content_size >= 0
data = MetadataBlock._writeblocks(
metadata_blocks, available, content_size, padding)
data_size = len(data)
resize_bytes(filething.fileobj, available, data_size, header)
f.seek(header - 4)
f.write(b"fLaC")
f.write(data)
# Delete ID3v1
if deleteid3:
try:
f.seek(-128, 2)
except IOError:
pass
else:
if f.read(3) == b"TAG":
f.seek(-128, 2)
f.truncate()
def __find_audio_offset(self, fileobj):
byte = 0x00
while not (byte & 0x80):
byte = ord(fileobj.read(1))
size = to_int_be(fileobj.read(3))
try:
block_type = self.METADATA_BLOCKS[byte & 0x7F]
except IndexError:
block_type = None
if block_type and block_type._distrust_size:
# See comments in read_metadata_block; the size can't
# be trusted for Vorbis comment blocks and Picture block
block_type(fileobj)
else:
fileobj.read(size)
return fileobj.tell()
def __check_header(self, fileobj, name):
"""Returns the offset of the flac block start
(skipping id3 tags if found). The passed fileobj will be advanced to
that offset as well.
"""
size = 4
header = fileobj.read(4)
if header != b"fLaC":
size = None
if header[:3] == b"ID3":
size = 14 + BitPaddedInt(fileobj.read(6)[2:])
fileobj.seek(size - 4)
if fileobj.read(4) != b"fLaC":
size = None
if size is None:
raise FLACNoHeaderError(
"%r is not a valid FLAC file" % name)
return size
Open = FLAC
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Remove tags from a file.
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
f = FLAC(filething)
filething.fileobj.seek(0)
f.delete(filething)
| 31,711
|
Python
|
.py
| 752
| 32.582447
| 79
| 0.601001
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,501
|
oggvorbis.py
|
rembo10_headphones/lib/mutagen/oggvorbis.py
|
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Ogg Vorbis comments.
This module handles Vorbis files wrapped in an Ogg bitstream. The
first Vorbis stream found is used.
Read more about Ogg Vorbis at http://vorbis.com/. This module is based
on the specification at http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html.
"""
__all__ = ["OggVorbis", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen._vorbis import VCommentDict
from mutagen._util import get_size, loadfile, convert_error
from mutagen._tags import PaddingInfo
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggVorbisHeaderError(error):
pass
class OggVorbisInfo(StreamInfo):
"""OggVorbisInfo()
Ogg Vorbis stream information.
Attributes:
length (`float`): File length in seconds, as a float
channels (`int`): Number of channels
bitrate (`int`): Nominal ('average') bitrate in bits per second
sample_rate (`int`): Sample rate in Hz
"""
length = 0.0
channels = 0
bitrate = 0
sample_rate = 0
def __init__(self, fileobj):
"""Raises ogg.error, IOError"""
page = OggPage(fileobj)
if not page.packets:
raise OggVorbisHeaderError("page has not packets")
while not page.packets[0].startswith(b"\x01vorbis"):
page = OggPage(fileobj)
if not page.first:
raise OggVorbisHeaderError(
"page has ID header, but doesn't start a stream")
if len(page.packets[0]) < 28:
raise OggVorbisHeaderError(
"page contains a packet too short to be valid")
(self.channels, self.sample_rate, max_bitrate, nominal_bitrate,
min_bitrate) = struct.unpack("<BI3i", page.packets[0][11:28])
if self.sample_rate == 0:
raise OggVorbisHeaderError("sample rate can't be zero")
self.serial = page.serial
max_bitrate = max(0, max_bitrate)
min_bitrate = max(0, min_bitrate)
nominal_bitrate = max(0, nominal_bitrate)
if nominal_bitrate == 0:
self.bitrate = (max_bitrate + min_bitrate) // 2
elif max_bitrate and max_bitrate < nominal_bitrate:
# If the max bitrate is less than the nominal, we know
# the nominal is wrong.
self.bitrate = max_bitrate
elif min_bitrate > nominal_bitrate:
self.bitrate = min_bitrate
else:
self.bitrate = nominal_bitrate
def _post_tags(self, fileobj):
"""Raises ogg.error"""
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggVorbisHeaderError
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg Vorbis, %.2f seconds, %d bps" % (
self.length, self.bitrate)
class OggVCommentDict(VCommentDict):
"""Vorbis comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0][7:] # Strip off "\x03vorbis".
super(OggVCommentDict, self).__init__(data)
self._padding = len(data) - self._size
def _inject(self, fileobj, padding_func):
"""Write tag data into the Vorbis comment packet/page."""
# Find the old pages in the file; we'll need to remove them,
# plus grab any stray setup packet data out of them.
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x03vorbis"):
page = OggPage(fileobj)
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
content_size = get_size(fileobj) - len(packets[0]) # approx
vcomment_data = b"\x03vorbis" + self.write()
padding_left = len(packets[0]) - len(vcomment_data)
info = PaddingInfo(padding_left, content_size)
new_padding = info._get_padding(padding_func)
# Set the new comment packet.
packets[0] = vcomment_data + b"\x00" * new_padding
new_pages = OggPage._from_packets_try_preserve(packets, old_pages)
OggPage.replace(fileobj, old_pages, new_pages)
class OggVorbis(OggFileType):
"""OggVorbis(filething)
Arguments:
filething (filething)
An Ogg Vorbis file.
Attributes:
info (`OggVorbisInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_Info = OggVorbisInfo
_Tags = OggVCommentDict
_Error = OggVorbisHeaderError
_mimes = ["audio/vorbis", "audio/x-vorbis"]
info = None
tags = None
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"\x01vorbis" in header))
Open = OggVorbis
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggVorbis(filething)
filething.fileobj.seek(0)
t.delete(filething)
| 5,852
|
Python
|
.py
| 145
| 32.862069
| 77
| 0.646341
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,502
|
ac3.py
|
rembo10_headphones/lib/mutagen/ac3.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Pure AC3 file information.
"""
__all__ = ["AC3", "Open"]
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import (
BitReader,
BitReaderError,
MutagenError,
convert_error,
enum,
loadfile,
endswith,
)
@enum
class ChannelMode(object):
DUALMONO = 0
MONO = 1
STEREO = 2
C3F = 3
C2F1R = 4
C3F1R = 5
C2F2R = 6
C3F2R = 7
AC3_CHANNELS = {
ChannelMode.DUALMONO: 2,
ChannelMode.MONO: 1,
ChannelMode.STEREO: 2,
ChannelMode.C3F: 3,
ChannelMode.C2F1R: 3,
ChannelMode.C3F1R: 4,
ChannelMode.C2F2R: 4,
ChannelMode.C3F2R: 5
}
AC3_HEADER_SIZE = 7
AC3_SAMPLE_RATES = [48000, 44100, 32000]
AC3_BITRATES = [
32, 40, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384, 448, 512, 576, 640
]
@enum
class EAC3FrameType(object):
INDEPENDENT = 0
DEPENDENT = 1
AC3_CONVERT = 2
RESERVED = 3
EAC3_BLOCKS = [1, 2, 3, 6]
class AC3Error(MutagenError):
pass
class AC3Info(StreamInfo):
"""AC3 stream information.
The length of the stream is just a guess and might not be correct.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
codec (`str`): ac-3 or ec-3 (Enhanced AC-3)
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
codec = 'ac-3'
@convert_error(IOError, AC3Error)
def __init__(self, fileobj):
"""Raises AC3Error"""
header = bytearray(fileobj.read(6))
if len(header) < 6:
raise AC3Error("not enough data")
if not header.startswith(b"\x0b\x77"):
raise AC3Error("not a AC3 file")
bitstream_id = header[5] >> 3
if bitstream_id > 16:
raise AC3Error("invalid bitstream_id %i" % bitstream_id)
fileobj.seek(2)
self._read_header(fileobj, bitstream_id)
def _read_header(self, fileobj, bitstream_id):
bitreader = BitReader(fileobj)
try:
# This is partially based on code from
# https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/ac3_parser.c
if bitstream_id <= 10: # Normal AC-3
self._read_header_normal(bitreader, bitstream_id)
else: # Enhanced AC-3
self._read_header_enhanced(bitreader)
except BitReaderError as e:
raise AC3Error(e)
self.length = self._guess_length(fileobj)
def _read_header_normal(self, bitreader, bitstream_id):
r = bitreader
r.skip(16) # 16 bit CRC
sr_code = r.bits(2)
if sr_code == 3:
raise AC3Error("invalid sample rate code %i" % sr_code)
frame_size_code = r.bits(6)
if frame_size_code > 37:
raise AC3Error("invalid frame size code %i" % frame_size_code)
r.skip(5) # bitstream ID, already read
r.skip(3) # bitstream mode, not needed
channel_mode = ChannelMode(r.bits(3))
r.skip(2) # dolby surround mode or surround mix level
lfe_on = r.bits(1)
sr_shift = max(bitstream_id, 8) - 8
try:
self.sample_rate = AC3_SAMPLE_RATES[sr_code] >> sr_shift
self.bitrate = (AC3_BITRATES[frame_size_code >> 1] * 1000
) >> sr_shift
except KeyError as e:
raise AC3Error(e)
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_normal(r, channel_mode)
def _read_header_enhanced(self, bitreader):
r = bitreader
self.codec = "ec-3"
frame_type = r.bits(2)
if frame_type == EAC3FrameType.RESERVED:
raise AC3Error("invalid frame type %i" % frame_type)
r.skip(3) # substream ID, not needed
frame_size = (r.bits(11) + 1) << 1
if frame_size < AC3_HEADER_SIZE:
raise AC3Error("invalid frame size %i" % frame_size)
sr_code = r.bits(2)
try:
if sr_code == 3:
sr_code2 = r.bits(2)
if sr_code2 == 3:
raise AC3Error("invalid sample rate code %i" % sr_code2)
numblocks_code = 3
self.sample_rate = AC3_SAMPLE_RATES[sr_code2] // 2
else:
numblocks_code = r.bits(2)
self.sample_rate = AC3_SAMPLE_RATES[sr_code]
channel_mode = ChannelMode(r.bits(3))
lfe_on = r.bits(1)
self.bitrate = 8 * frame_size * self.sample_rate // (
EAC3_BLOCKS[numblocks_code] * 256)
except KeyError as e:
raise AC3Error(e)
r.skip(5) # bitstream ID, already read
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_enhanced(
r, frame_type, channel_mode, sr_code, numblocks_code)
@staticmethod
def _skip_unused_header_bits_normal(bitreader, channel_mode):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if r.bits(1): # Language Code Exists
r.skip(8) # Language Code
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
r.skip(7)
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if r.bits(1): # Language Code Exists, ch2
r.skip(8) # Language Code, ch2
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
r.skip(7)
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(2)
timecod1e = r.bits(1) # Time Code First Halve Exists
timecod2e = r.bits(1) # Time Code Second Halve Exists
if timecod1e:
r.skip(14) # Time Code First Half
if timecod2e:
r.skip(14) # Time Code Second Half
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _skip_unused_header_bits_enhanced(bitreader, frame_type, channel_mode,
sr_code, numblocks_code):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if frame_type == EAC3FrameType.DEPENDENT:
if r.bits(1): # chanmap exists
r.skip(16) # chanmap
if r.bits(1): # mixmdate, 1 Bit
# FIXME: Handle channel dependent fields
return
if r.bits(1): # Informational Metadata Exists
# bsmod, 3 Bits
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(5)
if channel_mode == ChannelMode.STEREO:
# dsurmod. 2 Bits
# dheadphonmod, 2 Bits
r.skip(4)
elif channel_mode >= ChannelMode.C2F2R:
r.skip(2) # dsurexmod
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
# adconvtyp, 1 Bit
r.skip(8)
if channel_mode == ChannelMode.DUALMONO:
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
# adconvtyp, ch2, 1 Bit
r.skip(8)
if sr_code < 3: # if not half sample rate
r.skip(1) # sourcefscod
if frame_type == EAC3FrameType.INDEPENDENT and numblocks_code == 3:
r.skip(1) # convsync
if frame_type == EAC3FrameType.AC3_CONVERT:
if numblocks_code != 3:
if r.bits(1): # blkid
r.skip(6) # frmsizecod
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _get_channels(channel_mode, lfe_on):
try:
return AC3_CHANNELS[channel_mode] + lfe_on
except KeyError as e:
raise AC3Error(e)
def _guess_length(self, fileobj):
# use bitrate + data size to guess length
if self.bitrate == 0:
return
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
return 8.0 * length / self.bitrate
def pprint(self):
return u"%s, %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self.codec, self.sample_rate, self.length, self.channels,
self.bitrate)
class AC3(FileType):
"""AC3(filething)
Arguments:
filething (filething)
Load AC3 or EAC3 files.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
Attributes:
info (`AC3Info`)
"""
_mimes = ["audio/ac3"]
@loadfile()
def load(self, filething):
self.info = AC3Info(filething.fileobj)
def add_tags(self):
raise AC3Error("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"\x0b\x77") * 2 \
+ (endswith(filename, ".ac3") or endswith(filename, ".eac3"))
Open = AC3
error = AC3Error
| 10,451
|
Python
|
.py
| 274
| 28.638686
| 82
| 0.576326
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,503
|
optimfrog.py
|
rembo10_headphones/lib/mutagen/optimfrog.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""OptimFROG audio streams with APEv2 tags.
OptimFROG is a lossless audio compression program. Its main goal is to
reduce at maximum the size of audio files, while permitting bit
identical restoration for all input. It is similar with the ZIP
compression, but it is highly specialized to compress audio data.
Only versions 4.5 and higher are supported.
For more information, see http://www.losslessaudio.org/
"""
__all__ = ["OptimFROG", "Open", "delete"]
import struct
from ._util import convert_error, endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
SAMPLE_TYPE_BITS = {
0: 8,
1: 8,
2: 16,
3: 16,
4: 24,
5: 24,
6: 32,
7: 32,
}
class OptimFROGHeaderError(error):
pass
class OptimFROGInfo(StreamInfo):
"""OptimFROGInfo()
OptimFROG stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): the audio sample size
encoder_info (`mutagen.text`): encoder version, e.g. "5.100"
"""
@convert_error(IOError, OptimFROGHeaderError)
def __init__(self, fileobj):
"""Raises OptimFROGHeaderError"""
header = fileobj.read(76)
if len(header) != 76 or not header.startswith(b"OFR "):
raise OptimFROGHeaderError("not an OptimFROG file")
data_size = struct.unpack("<I", header[4:8])[0]
if data_size != 12 and data_size < 15:
raise OptimFROGHeaderError("not an OptimFROG file")
(total_samples, total_samples_high, sample_type, self.channels,
self.sample_rate) = struct.unpack("<IHBBI", header[8:20])
total_samples += total_samples_high << 32
self.channels += 1
self.bits_per_sample = SAMPLE_TYPE_BITS.get(sample_type)
if self.sample_rate:
self.length = float(total_samples) / (self.channels *
self.sample_rate)
else:
self.length = 0.0
if data_size >= 15:
encoder_id = struct.unpack("<H", header[20:22])[0]
version = str((encoder_id >> 4) + 4500)
self.encoder_info = "%s.%s" % (version[0], version[1:])
else:
self.encoder_info = ""
def pprint(self):
return u"OptimFROG, %.2f seconds, %d Hz" % (self.length,
self.sample_rate)
class OptimFROG(APEv2File):
"""OptimFROG(filething)
Attributes:
info (`OptimFROGInfo`)
tags (`mutagen.apev2.APEv2`)
"""
_Info = OptimFROGInfo
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"OFR") + endswith(filename, b".ofr") +
endswith(filename, b".ofs"))
Open = OptimFROG
| 3,253
|
Python
|
.py
| 83
| 31.927711
| 73
| 0.635294
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,504
|
oggopus.py
|
rembo10_headphones/lib/mutagen/oggopus.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Ogg Opus comments.
This module handles Opus files wrapped in an Ogg bitstream. The
first Opus stream found is used.
Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01
"""
__all__ = ["OggOpus", "Open", "delete"]
import struct
from io import BytesIO
from mutagen import StreamInfo
from mutagen._util import get_size, loadfile, convert_error
from mutagen._tags import PaddingInfo
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggOpusHeaderError(error):
pass
class OggOpusInfo(StreamInfo):
"""OggOpusInfo()
Ogg Opus stream information.
Attributes:
length (`float`): File length in seconds, as a float
channels (`int`): Number of channels
"""
length = 0
channels = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"OpusHead"):
page = OggPage(fileobj)
self.serial = page.serial
if not page.first:
raise OggOpusHeaderError(
"page has ID header, but doesn't start a stream")
(version, self.channels, pre_skip, orig_sample_rate, output_gain,
channel_map) = struct.unpack("<BBHIhB", page.packets[0][8:19])
self.__pre_skip = pre_skip
# only the higher 4 bits change on incombatible changes
major = version >> 4
if major != 0:
raise OggOpusHeaderError("version %r unsupported" % major)
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggOpusHeaderError
self.length = (page.position - self.__pre_skip) / float(48000)
def pprint(self):
return u"Ogg Opus, %.2f seconds" % (self.length)
class OggOpusVComment(VCommentDict):
"""Opus comments embedded in an Ogg bitstream."""
def __get_comment_pages(self, fileobj, info):
# find the first tags page with the right serial
page = OggPage(fileobj)
while ((info.serial != page.serial) or
not page.packets[0].startswith(b"OpusTags")):
page = OggPage(fileobj)
# get all comment pages
pages = [page]
while not (pages[-1].complete or len(pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == pages[0].serial:
pages.append(page)
return pages
def __init__(self, fileobj, info):
pages = self.__get_comment_pages(fileobj, info)
data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags
fileobj = BytesIO(data)
super(OggOpusVComment, self).__init__(fileobj, framing=False)
self._padding = len(data) - self._size
# in case the LSB of the first byte after v-comment is 1, preserve the
# following data
padding_flag = fileobj.read(1)
if padding_flag and ord(padding_flag) & 0x1:
self._pad_data = padding_flag + fileobj.read()
self._padding = 0 # we have to preserve, so no padding
else:
self._pad_data = b""
def _inject(self, fileobj, padding_func):
fileobj.seek(0)
info = OggOpusInfo(fileobj)
old_pages = self.__get_comment_pages(fileobj, info)
packets = OggPage.to_packets(old_pages)
vcomment_data = b"OpusTags" + self.write(framing=False)
if self._pad_data:
# if we have padding data to preserver we can't add more padding
# as long as we don't know the structure of what follows
packets[0] = vcomment_data + self._pad_data
else:
content_size = get_size(fileobj) - len(packets[0]) # approx
padding_left = len(packets[0]) - len(vcomment_data)
info = PaddingInfo(padding_left, content_size)
new_padding = info._get_padding(padding_func)
packets[0] = vcomment_data + b"\x00" * new_padding
new_pages = OggPage._from_packets_try_preserve(packets, old_pages)
OggPage.replace(fileobj, old_pages, new_pages)
class OggOpus(OggFileType):
"""OggOpus(filething)
An Ogg Opus file.
Arguments:
filething (filething)
Attributes:
info (`OggOpusInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_Info = OggOpusInfo
_Tags = OggOpusVComment
_Error = OggOpusHeaderError
_mimes = ["audio/ogg", "audio/ogg; codecs=opus"]
info = None
tags = None
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"OpusHead" in header))
Open = OggOpus
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggOpus(filething)
filething.fileobj.seek(0)
t.delete(filething)
| 5,358
|
Python
|
.py
| 134
| 32.69403
| 78
| 0.648434
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,505
|
ogg.py
|
rembo10_headphones/lib/mutagen/ogg.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from io import BytesIO
from mutagen import FileType
from mutagen._util import cdata, resize_bytes, MutagenError, loadfile, \
seek_end, bchr, reraise
class error(MutagenError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is given a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version (`int`): stream structure version (currently always 0)
position (`int`): absolute stream position (default -1)
serial (`int`): logical stream serial number (default 0)
sequence (`int`): page sequence number within logical stream
(default 0)
offset (`int` or `None`): offset this page was read from (default None)
complete (`bool`): if the last packet on this page is complete
(default True)
packets (list[bytes]): list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
"""Raises error, IOError, EOFError"""
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags,
self.position, self.serial, self.sequence,
crc, segments) = struct.unpack("<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != b"OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, b"OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in bytearray(lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = [fileobj.read(l) for l in lacings]
if [len(p) for p in self.packets] != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append(b"\xff" * quot + bchr(rem))
lacing_data = b"".join(lacing_data)
if not self.complete and lacing_data.endswith(b"\x00"):
lacing_data = lacing_data[:-1]
data.append(bchr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = b"".join(data)
# Python's CRC is swapped relative to Ogg's needs.
# crc32 returns uint prior to py2.6 on some platforms, so force uint
crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff
# Although we're using to_uint_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_uint_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
@property
def size(self):
"""Total frame size."""
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
def __set_flag(self, bit, val):
mask = 1 << bit
if val:
self.__type_flags |= mask
else:
self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
@staticmethod
def renumber(fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occurred,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try:
page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
@staticmethod
def to_packets(pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append([b""])
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else:
sequence += 1
if page.packets:
if page.continued:
packets[-1].append(page.packets[0])
else:
packets.append([page.packets[0]])
packets.extend([p] for p in page.packets[1:])
return [b"".join(p) for p in packets]
@classmethod
def _from_packets_try_preserve(cls, packets, old_pages):
"""Like from_packets but in case the size and number of the packets
is the same as in the given pages the layout of the pages will
be copied (the page size and number will match).
If the packets don't match this behaves like::
OggPage.from_packets(packets, sequence=old_pages[0].sequence)
"""
old_packets = cls.to_packets(old_pages)
if [len(p) for p in packets] != [len(p) for p in old_packets]:
# doesn't match, fall back
return cls.from_packets(packets, old_pages[0].sequence)
new_data = b"".join(packets)
new_pages = []
for old in old_pages:
new = OggPage()
new.sequence = old.sequence
new.complete = old.complete
new.continued = old.continued
new.position = old.position
for p in old.packets:
data, new_data = new_data[:len(p)], new_data[len(p):]
new.packets.append(data)
new_pages.append(new)
assert not new_data
return new_pages
@staticmethod
def from_packets(packets, sequence=0, default_size=4096,
wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append(b"")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = b""
if page.packets:
pages.append(page)
return pages
@classmethod
def replace(cls, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
if not len(old_pages) or not len(new_pages):
raise ValueError("empty pages list not allowed")
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages,
range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1
new_data = [cls.write(p) for p in new_pages]
# Add dummy data or merge the remaining data together so multiple
# new pages replace an old one
pages_diff = len(old_pages) - len(new_data)
if pages_diff > 0:
new_data.extend([b""] * pages_diff)
elif pages_diff < 0:
new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])]
# Replace pages one by one. If the sizes match no resize happens.
offset_adjust = 0
new_data_end = None
assert len(old_pages) == len(new_data)
for old_page, data in zip(old_pages, new_data):
offset = old_page.offset + offset_adjust
data_size = len(data)
resize_bytes(fileobj, old_page.size, data_size, offset)
fileobj.seek(offset, 0)
fileobj.write(data)
new_data_end = offset + data_size
offset_adjust += (data_size - old_page.size)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
cls.renumber(fileobj, serial, sequence)
@staticmethod
def find_last(fileobj, serial, finishing=False):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
If finishing is True it returns the last page which contains a packet
finishing on it. If there exist pages but none with finishing packets
returns None.
Returns None in case no page with the serial exists.
Raises error in case this isn't a valid ogg stream.
Raises IOError.
"""
# For non-muxed streams, look at the last page.
seek_end(fileobj, 256 * 256)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = BytesIO(data[index:])
def is_valid(page):
return not finishing or page.position != -1
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial and is_valid(page):
if page.last:
return page
else:
best_page = page
else:
best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while True:
if page.serial == serial:
if is_valid(page):
best_page = page
if page.last:
break
page = OggPage(fileobj)
return best_page
except error:
return best_page
except EOFError:
return best_page
class OggFileType(FileType):
"""OggFileType(filething)
An generic Ogg file.
Arguments:
filething (filething)
"""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
@loadfile()
def load(self, filething):
"""load(filething)
Load file information from a filename.
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
fileobj = filething.fileobj
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
self.info._post_tags(fileobj)
except (error, IOError) as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
@loadfile(writable=True)
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
If no filename is given, the one most recently loaded is used.
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
fileobj = filething.fileobj
self.tags.clear()
# TODO: we should delegate the deletion to the subclass and not through
# _inject.
try:
try:
self.tags._inject(fileobj, lambda x: 0)
except error as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
except IOError as e:
reraise(self._Error, e, sys.exc_info()[2])
def add_tags(self):
raise self._Error
@loadfile(writable=True)
def save(self, filething=None, padding=None):
"""save(filething=None, padding=None)
Save a tag to a file.
If no filename is given, the one most recently loaded is used.
Args:
filething (filething)
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
"""
try:
self.tags._inject(filething.fileobj, padding)
except (IOError, error) as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
| 20,135
|
Python
|
.py
| 477
| 31.536688
| 79
| 0.583043
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,506
|
m4a.py
|
rembo10_headphones/lib/mutagen/m4a.py
|
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
since 1.9: mutagen.m4a is deprecated; use mutagen.mp4 instead.
since 1.31: mutagen.m4a will no longer work; any operation that could fail
will fail now.
"""
import warnings
from mutagen import FileType, Tags, StreamInfo
from ._util import DictProxy, MutagenError, loadfile
warnings.warn(
"mutagen.m4a is deprecated; use mutagen.mp4 instead.",
DeprecationWarning)
class error(MutagenError):
pass
class M4AMetadataError(error):
pass
class M4AStreamInfoError(error):
pass
class M4AMetadataValueError(error):
pass
__all__ = ['M4A', 'Open', 'delete', 'M4ACover']
class M4ACover(bytes):
FORMAT_JPEG = 0x0D
FORMAT_PNG = 0x0E
def __new__(cls, data, imageformat=None):
self = bytes.__new__(cls, data)
if imageformat is None:
imageformat = M4ACover.FORMAT_JPEG
self.imageformat = imageformat
return self
class M4ATags(DictProxy, Tags):
def load(self, atoms, fileobj):
raise error("deprecated")
def save(self, filename):
raise error("deprecated")
def delete(self, filename):
raise error("deprecated")
def pprint(self):
return u""
class M4AInfo(StreamInfo):
bitrate = 0
def __init__(self, atoms, fileobj):
raise error("deprecated")
def pprint(self):
return u""
class M4A(FileType):
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@loadfile()
def load(self, filething):
raise error("deprecated")
def add_tags(self):
self.tags = M4ATags()
@staticmethod
def score(filename, fileobj, header):
return 0
Open = M4A
def delete(filename):
raise error("deprecated")
| 2,031
|
Python
|
.py
| 64
| 26.6875
| 74
| 0.686203
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,507
|
aiff.py
|
rembo10_headphones/lib/mutagen/aiff.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""AIFF audio stream information and tags."""
import struct
from struct import pack
from mutagen import StreamInfo, FileType
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
IffID3,
InvalidChunk,
error as IffError,
)
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["AIFF", "Open", "delete"]
class error(IffError):
pass
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def read_float(data):
"""Raises OverflowError"""
assert len(data) == 10
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
raise OverflowError("inf and nan not supported")
else:
expon = expon - 16383
# this can raise OverflowError too
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class AIFFChunk(IffChunk):
"""Representation of a single IFF chunk"""
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sI', header)
@classmethod
def get_class(cls, id):
if id == 'FORM':
return AIFFFormChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('>4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('>I', self.data_size))
class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin):
"""The AIFF root chunk."""
def parse_next_subchunk(self):
return AIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != u'FORM':
raise InvalidChunk('Expected FORM chunk, got %s' % id)
AIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class AIFFFile(IffFile):
"""Representation of a AIFF file"""
def __init__(self, fileobj):
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
super().__init__(AIFFChunk, fileobj)
if self.root.id != u'FORM':
raise InvalidChunk("Root chunk must be a FORM chunk, got %s"
% self.root.id)
def __contains__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return True
return super().__contains__(id_)
def __getitem__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return self.root
return super().__getitem__(id_)
class AIFFInfo(StreamInfo):
"""AIFFInfo()
AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
iff = AIFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
raise error(str(e))
data = common_chunk.read()
if len(data) < 18:
raise error
info = struct.unpack('>hLh10s', data[:18])
channels, frame_count, sample_size, sample_rate = info
try:
self.sample_rate = int(read_float(sample_rate))
except OverflowError:
raise error("Invalid sample rate")
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.sample_rate != 0:
self.length = frame_count / float(self.sample_rate)
self.bits_per_sample = sample_size
self.sample_size = sample_size # For backward compatibility
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
def pprint(self):
return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(IffID3):
"""A AIFF file with ID3v2 tags"""
def _load_file(self, fileobj):
return AIFFFile(fileobj)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the AIFF file"""
try:
del AIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass
class AIFF(FileType):
"""AIFF(filething)
An AIFF audio file.
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`AIFFInfo`)
"""
_mimes = ["audio/aiff", "audio/x-aiff"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _IFFID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
try:
self.tags = _IFFID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
fileobj.seek(0, 0)
self.info = AIFFInfo(fileobj)
Open = AIFF
| 6,394
|
Python
|
.py
| 182
| 27.989011
| 78
| 0.617661
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,508
|
wave.py
|
rembo10_headphones/lib/mutagen/wave.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Borewit
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Microsoft WAVE/RIFF audio file/stream information and tags."""
import sys
import struct
from mutagen import StreamInfo, FileType
from mutagen.id3 import ID3
from mutagen._riff import RiffFile, InvalidChunk
from mutagen._iff import error as IffError
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
convert_error,
endswith,
loadfile,
reraise,
)
__all__ = ["WAVE", "Open", "delete"]
class error(IffError):
"""WAVE stream parsing errors."""
class _WaveFile(RiffFile):
"""Representation of a RIFF/WAVE file"""
def __init__(self, fileobj):
RiffFile.__init__(self, fileobj)
if self.file_type != u'WAVE':
raise error("Expected RIFF/WAVE.")
# Normalize ID3v2-tag-chunk to lowercase
if u'ID3' in self:
self[u'ID3'].id = u'id3'
class WaveStreamInfo(StreamInfo):
"""WaveStreamInfo()
Microsoft WAVE file information.
Information is parsed from the 'fmt' & 'data'chunk of the RIFF/WAVE file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0.0
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
SIZE = 16
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
wave_file = _WaveFile(fileobj)
try:
format_chunk = wave_file[u'fmt']
except KeyError as e:
raise error(str(e))
data = format_chunk.read()
if len(data) < 16:
raise InvalidChunk()
# RIFF: http://soundfile.sapp.org/doc/WaveFormat/
# Python struct.unpack:
# https://docs.python.org/2/library/struct.html#byte-order-size-and-alignment
info = struct.unpack('<hhLLhh', data[:self.SIZE])
self.audio_format, self.channels, self.sample_rate, byte_rate, \
block_align, self.bits_per_sample = info
self.bitrate = self.channels * block_align * self.sample_rate
# Calculate duration
self._number_of_samples = 0
if block_align > 0:
try:
data_chunk = wave_file[u'data']
self._number_of_samples = data_chunk.data_size / block_align
except KeyError:
pass
if self.sample_rate > 0:
self.length = self._number_of_samples / self.sample_rate
def pprint(self):
return u"%d channel RIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _WaveID3(ID3):
"""A Wave file with ID3v2 tags"""
def _pre_load_header(self, fileobj):
try:
fileobj.seek(_WaveFile(fileobj)[u'id3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the Wave/RIFF file"""
fileobj = filething.fileobj
wave_file = _WaveFile(fileobj)
if u'id3' not in wave_file:
wave_file.insert_chunk(u'id3')
chunk = wave_file[u'id3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
chunk.resize(len(data))
chunk.write(data)
def delete(self, filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
delete(filething)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the RIFF/WAVE file"""
try:
_WaveFile(filething.fileobj).delete_chunk(u'id3')
except KeyError:
pass
class WAVE(FileType):
"""WAVE(filething)
A Waveform Audio File Format
(WAVE, or more commonly known as WAV due to its filename extension)
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`WaveStreamInfo`)
"""
_mimes = ["audio/wav", "audio/wave"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"RIFF") + (header[8:12] == b'WAVE')
+ endswith(filename, b".wav") + endswith(filename, b".wave"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _WaveID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
self.info = WaveStreamInfo(fileobj)
fileobj.seek(0, 0)
try:
self.tags = _WaveID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
Open = WAVE
| 5,809
|
Python
|
.py
| 156
| 29.583333
| 88
| 0.624219
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,509
|
apev2.py
|
rembo10_headphones/lib/mutagen/apev2.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""APEv2 reading and writing.
The APEv2 format is most commonly used with Musepack files, but is
also the format of choice for WavPack and other formats. Some MP3s
also have APEv2 tags, but this can cause problems with many MP3
decoders and taggers.
APEv2 tags, like Vorbis comments, are freeform key=value pairs. APEv2
keys can be any ASCII string with characters from 0x20 to 0x7E,
between 2 and 255 characters long. Keys are case-sensitive, but
readers are recommended to be case insensitive, and it is forbidden to
multiple keys which differ only in case. Keys are usually stored
title-cased (e.g. 'Artist' rather than 'artist').
APEv2 values are slightly more structured than Vorbis comments; values
are flagged as one of text, binary, or an external reference (usually
a URI).
Based off the format specification found at
http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification.
"""
__all__ = ["APEv2", "APEv2File", "Open", "delete"]
import sys
import struct
from io import BytesIO
from collections.abc import MutableSequence
from mutagen import Metadata, FileType, StreamInfo
from mutagen._util import DictMixin, cdata, delete_bytes, total_ordering, \
MutagenError, loadfile, convert_error, seek_end, get_size, reraise
def is_valid_apev2_key(key):
if not isinstance(key, str):
raise TypeError("APEv2 key must be str")
# PY26 - Change to set literal syntax (since set is faster than list here)
return ((2 <= len(key) <= 255) and (min(key) >= u' ') and
(max(key) <= u'~') and
(key not in [u"OggS", u"TAG", u"ID3", u"MP+"]))
# There are three different kinds of APE tag values.
# "0: Item contains text information coded in UTF-8
# 1: Item contains binary information
# 2: Item is a locator of external stored information [e.g. URL]
# 3: reserved"
TEXT, BINARY, EXTERNAL = range(3)
HAS_HEADER = 1 << 31
HAS_NO_FOOTER = 1 << 30
IS_HEADER = 1 << 29
class error(MutagenError):
pass
class APENoHeaderError(error):
pass
class APEUnsupportedVersionError(error):
pass
class APEBadItemError(error):
pass
class _APEv2Data(object):
# Store offsets of the important parts of the file.
start = header = data = footer = end = None
# Footer or header; seek here and read 32 to get version/size/items/flags
metadata = None
# Actual tag data
tag = None
version = None
size = None
items = None
flags = 0
# The tag is at the start rather than the end. A tag at both
# the start and end of the file (i.e. the tag is the whole file)
# is not considered to be at the start.
is_at_start = False
def __init__(self, fileobj):
"""Raises IOError and apev2.error"""
self.__find_metadata(fileobj)
if self.header is None:
self.metadata = self.footer
elif self.footer is None:
self.metadata = self.header
else:
self.metadata = max(self.header, self.footer)
if self.metadata is None:
return
self.__fill_missing(fileobj)
self.__fix_brokenness(fileobj)
if self.data is not None:
fileobj.seek(self.data)
self.tag = fileobj.read(self.size)
def __find_metadata(self, fileobj):
# Try to find a header or footer.
# Check for a simple footer.
try:
fileobj.seek(-32, 2)
except IOError:
fileobj.seek(0, 2)
return
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = self.metadata = fileobj.tell()
return
# Check for an APEv2 tag followed by an ID3v1 tag at the end.
try:
if get_size(fileobj) < 128:
raise IOError
fileobj.seek(-128, 2)
if fileobj.read(3) == b"TAG":
fileobj.seek(-35, 1) # "TAG" + header length
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = fileobj.tell()
return
# ID3v1 tag at the end, maybe preceded by Lyrics3v2.
# (http://www.id3.org/lyrics3200.html)
# (header length - "APETAGEX") - "LYRICS200"
fileobj.seek(15, 1)
if fileobj.read(9) == b'LYRICS200':
fileobj.seek(-15, 1) # "LYRICS200" + size tag
try:
offset = int(fileobj.read(6))
except ValueError:
raise IOError
fileobj.seek(-32 - offset - 6, 1)
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
self.footer = fileobj.tell()
return
except IOError:
pass
# Check for a tag at the start.
fileobj.seek(0, 0)
if fileobj.read(8) == b"APETAGEX":
self.is_at_start = True
self.header = 0
def __fill_missing(self, fileobj):
"""Raises IOError and apev2.error"""
fileobj.seek(self.metadata + 8)
data = fileobj.read(16)
if len(data) != 16:
raise error
self.version = data[:4]
self.size = cdata.uint32_le(data[4:8])
self.items = cdata.uint32_le(data[8:12])
self.flags = cdata.uint32_le(data[12:])
if self.header is not None:
self.data = self.header + 32
# If we're reading the header, the size is the header
# offset + the size, which includes the footer.
self.end = self.data + self.size
fileobj.seek(self.end - 32, 0)
if fileobj.read(8) == b"APETAGEX":
self.footer = self.end - 32
elif self.footer is not None:
self.end = self.footer + 32
self.data = self.end - self.size
if self.flags & HAS_HEADER:
self.header = self.data - 32
else:
self.header = self.data
else:
raise APENoHeaderError("No APE tag found")
# exclude the footer from size
if self.footer is not None:
self.size -= 32
def __fix_brokenness(self, fileobj):
# Fix broken tags written with PyMusepack.
if self.header is not None:
start = self.header
else:
start = self.data
fileobj.seek(start)
while start > 0:
# Clean up broken writing from pre-Mutagen PyMusepack.
# It didn't remove the first 24 bytes of header.
try:
fileobj.seek(-24, 1)
except IOError:
break
else:
if fileobj.read(8) == b"APETAGEX":
fileobj.seek(-8, 1)
start = fileobj.tell()
else:
break
self.start = start
class _CIDictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__casemap = {}
self.__dict = {}
super(_CIDictProxy, self).__init__(*args, **kwargs)
# Internally all names are stored as lowercase, but the case
# they were set with is remembered and used when saving. This
# is roughly in line with the standard, which says that keys
# are case-sensitive but two keys differing only in case are
# not allowed, and recommends case-insensitive
# implementations.
def __getitem__(self, key):
return self.__dict[key.lower()]
def __setitem__(self, key, value):
lower = key.lower()
self.__casemap[lower] = key
self.__dict[lower] = value
def __delitem__(self, key):
lower = key.lower()
del(self.__casemap[lower])
del(self.__dict[lower])
def keys(self):
return [self.__casemap.get(key, key) for key in self.__dict.keys()]
class APEv2(_CIDictProxy, Metadata):
"""APEv2(filething=None)
A file with an APEv2 tag.
ID3v1 tags are silently ignored and overwritten.
"""
filename = None
def pprint(self):
"""Return tag key=value pairs in a human-readable format."""
items = sorted(self.items())
return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items)
@convert_error(IOError, error)
@loadfile()
def load(self, filething):
"""Load tags from a filename.
Raises apev2.error
"""
data = _APEv2Data(filething.fileobj)
if data.tag:
self.clear()
self.__parse_tag(data.tag, data.items)
else:
raise APENoHeaderError("No APE tag found")
def __parse_tag(self, tag, count):
"""Raises IOError and APEBadItemError"""
fileobj = BytesIO(tag)
for i in range(count):
tag_data = fileobj.read(8)
# someone writes wrong item counts
if not tag_data:
break
if len(tag_data) != 8:
raise error
size = cdata.uint32_le(tag_data[:4])
flags = cdata.uint32_le(tag_data[4:8])
# Bits 1 and 2 bits are flags, 0-3
# Bit 0 is read/write flag, ignored
kind = (flags & 6) >> 1
if kind == 3:
raise APEBadItemError("value type must be 0, 1, or 2")
key = value = fileobj.read(1)
if not key:
raise APEBadItemError
while key[-1:] != b'\x00' and value:
value = fileobj.read(1)
if not value:
raise APEBadItemError
key += value
if key[-1:] == b"\x00":
key = key[:-1]
try:
key = key.decode("ascii")
except UnicodeError as err:
reraise(APEBadItemError, err, sys.exc_info()[2])
value = fileobj.read(size)
if len(value) != size:
raise APEBadItemError
value = _get_value_type(kind)._new(value)
self[key] = value
def __getitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
return super(APEv2, self).__getitem__(key)
def __delitem__(self, key):
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
super(APEv2, self).__delitem__(key)
def __setitem__(self, key, value):
"""'Magic' value setter.
This function tries to guess at what kind of value you want to
store. If you pass in a valid UTF-8 or Unicode string, it
treats it as a text value. If you pass in a list, it treats it
as a list of string/Unicode values. If you pass in a string
that is not valid UTF-8, it assumes it is a binary value.
Python 3: all bytes will be assumed to be a byte value, even
if they are valid utf-8.
If you need to force a specific type of value (e.g. binary
data that also happens to be valid UTF-8, or an external
reference), use the APEValue factory and set the value to the
result of that::
from mutagen.apev2 import APEValue, EXTERNAL
tag['Website'] = APEValue('http://example.org', EXTERNAL)
"""
if not is_valid_apev2_key(key):
raise KeyError("%r is not a valid APEv2 key" % key)
if not isinstance(value, _APEValue):
# let's guess at the content if we're not already a value...
if isinstance(value, str):
# unicode? we've got to be text.
value = APEValue(value, TEXT)
elif isinstance(value, list):
items = []
for v in value:
if not isinstance(v, str):
raise TypeError("item in list not str")
items.append(v)
# list? text.
value = APEValue(u"\0".join(items), TEXT)
else:
value = APEValue(value, BINARY)
super(APEv2, self).__setitem__(key, value)
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer.
"""
fileobj = filething.fileobj
data = _APEv2Data(fileobj)
if data.is_at_start:
delete_bytes(fileobj, data.end - data.start, data.start)
elif data.start is not None:
fileobj.seek(data.start)
# Delete an ID3v1 tag if present, too.
fileobj.truncate()
fileobj.seek(0, 2)
tags = []
for key, value in self.items():
# Packed format for an item:
# 4B: Value length
# 4B: Value type
# Key name
# 1B: Null
# Key value
value_data = value._write()
if not isinstance(key, bytes):
key = key.encode("utf-8")
tag_data = bytearray()
tag_data += struct.pack("<2I", len(value_data), value.kind << 1)
tag_data += key + b"\0" + value_data
tags.append(bytes(tag_data))
# "APE tags items should be sorted ascending by size... This is
# not a MUST, but STRONGLY recommended. Actually the items should
# be sorted by importance/byte, but this is not feasible."
tags.sort(key=lambda tag: (len(tag), tag))
num_tags = len(tags)
tags = b"".join(tags)
header = bytearray(b"APETAGEX")
# version, tag size, item count, flags
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER | IS_HEADER)
header += b"\0" * 8
fileobj.write(header)
fileobj.write(tags)
footer = bytearray(b"APETAGEX")
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER)
footer += b"\0" * 8
fileobj.write(footer)
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething=None):
"""Remove tags from a file."""
fileobj = filething.fileobj
data = _APEv2Data(fileobj)
if data.start is not None and data.size is not None:
delete_bytes(fileobj, data.end - data.start, data.start)
self.clear()
Open = APEv2
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
try:
t = APEv2(filething)
except APENoHeaderError:
return
filething.fileobj.seek(0)
t.delete(filething)
def _get_value_type(kind):
"""Returns a _APEValue subclass or raises ValueError"""
if kind == TEXT:
return APETextValue
elif kind == BINARY:
return APEBinaryValue
elif kind == EXTERNAL:
return APEExtValue
raise ValueError("unknown kind %r" % kind)
def APEValue(value, kind):
"""APEv2 tag value factory.
Use this if you need to specify the value's type manually. Binary
and text data are automatically detected by APEv2.__setitem__.
"""
try:
type_ = _get_value_type(kind)
except ValueError:
raise ValueError("kind must be TEXT, BINARY, or EXTERNAL")
else:
return type_(value)
class _APEValue(object):
kind = None
value = None
def __init__(self, value, kind=None):
# kind kwarg is for backwards compat
if kind is not None and kind != self.kind:
raise ValueError
self.value = self._validate(value)
@classmethod
def _new(cls, data):
instance = cls.__new__(cls)
instance._parse(data)
return instance
def _parse(self, data):
"""Sets value or raises APEBadItemError"""
raise NotImplementedError
def _write(self):
"""Returns bytes"""
raise NotImplementedError
def _validate(self, value):
"""Returns validated value or raises TypeError/ValueErrr"""
raise NotImplementedError
def __repr__(self):
return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind)
@total_ordering
class _APEUtf8Value(_APEValue):
def _parse(self, data):
try:
self.value = data.decode("utf-8")
except UnicodeDecodeError as e:
reraise(APEBadItemError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, str):
raise TypeError("value not str")
return value
def _write(self):
return self.value.encode("utf-8")
def __len__(self):
return len(self.value)
def __bytes__(self):
return self._write()
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
def __str__(self):
return self.value
class APETextValue(_APEUtf8Value, MutableSequence):
"""An APEv2 text value.
Text values are Unicode/UTF-8 strings. They can be accessed like
strings (with a null separating the values), or arrays of strings.
"""
kind = TEXT
def __iter__(self):
"""Iterate over the strings of the value (not the characters)"""
return iter(self.value.split(u"\0"))
def __getitem__(self, index):
return self.value.split(u"\0")[index]
def __len__(self):
return self.value.count(u"\0") + 1
def __setitem__(self, index, value):
if not isinstance(value, str):
raise TypeError("value not str")
values = list(self)
values[index] = value
self.value = u"\0".join(values)
def insert(self, index, value):
if not isinstance(value, str):
raise TypeError("value not str")
values = list(self)
values.insert(index, value)
self.value = u"\0".join(values)
def __delitem__(self, index):
values = list(self)
del values[index]
self.value = u"\0".join(values)
def pprint(self):
return u" / ".join(self)
@total_ordering
class APEBinaryValue(_APEValue):
"""An APEv2 binary value."""
kind = BINARY
def _parse(self, data):
self.value = data
def _write(self):
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("value not bytes")
return bytes(value)
def __len__(self):
return len(self.value)
def __bytes__(self):
return self._write()
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
def pprint(self):
return u"[%d bytes]" % len(self)
class APEExtValue(_APEUtf8Value):
"""An APEv2 external value.
External values are usually URI or IRI strings.
"""
kind = EXTERNAL
def pprint(self):
return u"[External] %s" % self.value
class APEv2File(FileType):
"""APEv2File(filething)
Arguments:
filething (filething)
Attributes:
tags (`APEv2`)
"""
class _Info(StreamInfo):
length = 0
bitrate = 0
def __init__(self, fileobj):
pass
@staticmethod
def pprint():
return u"Unknown format with APEv2 tag."
@loadfile()
def load(self, filething):
fileobj = filething.fileobj
self.info = self._Info(fileobj)
try:
fileobj.seek(0, 0)
except IOError as e:
raise error(e)
try:
self.tags = APEv2(fileobj)
except APENoHeaderError:
self.tags = None
def add_tags(self):
if self.tags is None:
self.tags = APEv2()
else:
raise error("%r already has tags: %r" % (self, self.tags))
@staticmethod
def score(filename, fileobj, header):
try:
seek_end(fileobj, 160)
footer = fileobj.read()
except IOError:
return -1
return ((b"APETAGEX" in footer) - header.startswith(b"ID3"))
| 20,773
|
Python
|
.py
| 549
| 28.493625
| 78
| 0.58278
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,510
|
dsf.py
|
rembo10_headphones/lib/mutagen/dsf.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Boris Pruessmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write DSF audio stream information and tags."""
import sys
import struct
from io import BytesIO
from mutagen import FileType, StreamInfo
from mutagen._util import cdata, MutagenError, loadfile, \
convert_error, reraise, endswith
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
__all__ = ["DSF", "Open", "delete"]
class error(MutagenError):
pass
class DSFChunk(object):
"""A generic chunk of a DSFFile."""
chunk_offset = 0
chunk_header = " "
chunk_size = -1
def __init__(self, fileobj, create=False):
self.fileobj = fileobj
if not create:
self.chunk_offset = fileobj.tell()
self.load()
def load(self):
raise NotImplementedError
def write(self):
raise NotImplementedError
class DSDChunk(DSFChunk):
"""Represents the first chunk of a DSF file"""
CHUNK_SIZE = 28
total_size = 0
offset_metdata_chunk = 0
def __init__(self, fileobj, create=False):
super(DSDChunk, self).__init__(fileobj, create)
if create:
self.chunk_header = b"DSD "
self.chunk_size = DSDChunk.CHUNK_SIZE
def load(self):
data = self.fileobj.read(DSDChunk.CHUNK_SIZE)
if len(data) != DSDChunk.CHUNK_SIZE:
raise error("DSF chunk truncated")
self.chunk_header = data[0:4]
if self.chunk_header != b"DSD ":
raise error("DSF dsd header not found")
self.chunk_size = cdata.ulonglong_le(data[4:12])
if self.chunk_size != DSDChunk.CHUNK_SIZE:
raise error("DSF dsd header size mismatch")
self.total_size = cdata.ulonglong_le(data[12:20])
self.offset_metdata_chunk = cdata.ulonglong_le(data[20:28])
def write(self):
f = BytesIO()
f.write(self.chunk_header)
f.write(struct.pack("<Q", DSDChunk.CHUNK_SIZE))
f.write(struct.pack("<Q", self.total_size))
f.write(struct.pack("<Q", self.offset_metdata_chunk))
self.fileobj.seek(self.chunk_offset)
self.fileobj.write(f.getvalue())
def pprint(self):
return (u"DSD Chunk (Total file size = %d, "
u"Pointer to Metadata chunk = %d)" % (
self.total_size, self.offset_metdata_chunk))
class FormatChunk(DSFChunk):
CHUNK_SIZE = 52
VERSION = 1
FORMAT_DSD_RAW = 0
"""Format ID: DSD Raw"""
format_version = VERSION
format_id = FORMAT_DSD_RAW
channel_type = 1
channel_num = 1
sampling_frequency = 2822400
bits_per_sample = 1
sample_count = 0
block_size_per_channel = 4096
def __init__(self, fileobj, create=False):
super(FormatChunk, self).__init__(fileobj, create)
if create:
self.chunk_header = b"fmt "
self.chunk_size = FormatChunk.CHUNK_SIZE
def load(self):
data = self.fileobj.read(FormatChunk.CHUNK_SIZE)
if len(data) != FormatChunk.CHUNK_SIZE:
raise error("DSF chunk truncated")
self.chunk_header = data[0:4]
if self.chunk_header != b"fmt ":
raise error("DSF fmt header not found")
self.chunk_size = cdata.ulonglong_le(data[4:12])
if self.chunk_size != FormatChunk.CHUNK_SIZE:
raise error("DSF dsd header size mismatch")
self.format_version = cdata.uint_le(data[12:16])
if self.format_version != FormatChunk.VERSION:
raise error("Unsupported format version")
self.format_id = cdata.uint_le(data[16:20])
if self.format_id != FormatChunk.FORMAT_DSD_RAW:
raise error("Unsupported format ID")
self.channel_type = cdata.uint_le(data[20:24])
self.channel_num = cdata.uint_le(data[24:28])
self.sampling_frequency = cdata.uint_le(data[28:32])
self.bits_per_sample = cdata.uint_le(data[32:36])
self.sample_count = cdata.ulonglong_le(data[36:44])
def pprint(self):
return u"fmt Chunk (Channel Type = %d, Channel Num = %d, " \
u"Sampling Frequency = %d, %.2f seconds)" % \
(self.channel_type, self.channel_num, self.sampling_frequency,
self.length)
class DataChunk(DSFChunk):
CHUNK_SIZE = 12
data = ""
def __init__(self, fileobj, create=False):
super(DataChunk, self).__init__(fileobj, create)
if create:
self.chunk_header = b"data"
self.chunk_size = DataChunk.CHUNK_SIZE
def load(self):
data = self.fileobj.read(DataChunk.CHUNK_SIZE)
if len(data) != DataChunk.CHUNK_SIZE:
raise error("DSF chunk truncated")
self.chunk_header = data[0:4]
if self.chunk_header != b"data":
raise error("DSF data header not found")
self.chunk_size = cdata.ulonglong_le(data[4:12])
if self.chunk_size < DataChunk.CHUNK_SIZE:
raise error("DSF data header size mismatch")
def pprint(self):
return u"data Chunk (Chunk Offset = %d, Chunk Size = %d)" % (
self.chunk_offset, self.chunk_size)
class _DSFID3(ID3):
"""A DSF file with ID3v2 tags"""
@convert_error(IOError, error)
def _pre_load_header(self, fileobj):
fileobj.seek(0)
id3_location = DSDChunk(fileobj).offset_metdata_chunk
if id3_location == 0:
raise ID3NoHeaderError("File has no existing ID3 tag")
fileobj.seek(id3_location)
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the DSF file"""
fileobj = filething.fileobj
fileobj.seek(0)
dsd_header = DSDChunk(fileobj)
if dsd_header.offset_metdata_chunk == 0:
# create a new ID3 chunk at the end of the file
fileobj.seek(0, 2)
# store reference to ID3 location
dsd_header.offset_metdata_chunk = fileobj.tell()
dsd_header.write()
try:
data = self._prepare_data(
fileobj, dsd_header.offset_metdata_chunk, self.size,
v2_version, v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
fileobj.seek(dsd_header.offset_metdata_chunk)
fileobj.write(data)
fileobj.truncate()
# Update total file size
dsd_header.total_size = fileobj.tell()
dsd_header.write()
class DSFInfo(StreamInfo):
"""DSF audio stream information.
Information is parsed from the fmt chunk of the DSF file.
Attributes:
length (`float`): audio length, in seconds.
channels (`int`): The number of audio channels.
sample_rate (`int`):
Sampling frequency, in Hz.
(2822400, 5644800, 11289600, or 22579200)
bits_per_sample (`int`): The audio sample size.
bitrate (`int`): The audio bitrate.
"""
def __init__(self, fmt_chunk):
self.fmt_chunk = fmt_chunk
@property
def length(self):
return float(self.fmt_chunk.sample_count) / self.sample_rate
@property
def channels(self):
return self.fmt_chunk.channel_num
@property
def sample_rate(self):
return self.fmt_chunk.sampling_frequency
@property
def bits_per_sample(self):
return self.fmt_chunk.bits_per_sample
@property
def bitrate(self):
return self.sample_rate * self.bits_per_sample * self.channels
def pprint(self):
return u"%d channel DSF @ %d bits, %s Hz, %.2f seconds" % (
self.channels, self.bits_per_sample, self.sample_rate, self.length)
class DSFFile(object):
dsd_chunk = None
fmt_chunk = None
data_chunk = None
def __init__(self, fileobj):
self.dsd_chunk = DSDChunk(fileobj)
self.fmt_chunk = FormatChunk(fileobj)
self.data_chunk = DataChunk(fileobj)
class DSF(FileType):
"""An DSF audio file.
Arguments:
filething (filething)
Attributes:
info (`DSFInfo`)
tags (`mutagen.id3.ID3Tags` or `None`)
"""
_mimes = ["audio/dsf"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"DSD ") * 2 + \
endswith(filename.lower(), ".dsf")
def add_tags(self):
"""Add a DSF tag block to the file."""
if self.tags is None:
self.tags = _DSFID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
dsf_file = DSFFile(filething.fileobj)
try:
self.tags = _DSFID3(filething.fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
self.info = DSFInfo(dsf_file.fmt_chunk)
@loadfile(writable=True)
def delete(self, filething=None):
self.tags = None
delete(filething)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Remove tags from a file.
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
dsf_file = DSFFile(filething.fileobj)
if dsf_file.dsd_chunk.offset_metdata_chunk != 0:
id3_location = dsf_file.dsd_chunk.offset_metdata_chunk
dsf_file.dsd_chunk.offset_metdata_chunk = 0
dsf_file.dsd_chunk.write()
filething.fileobj.seek(id3_location)
filething.fileobj.truncate()
Open = DSF
| 9,986
|
Python
|
.py
| 259
| 30.525097
| 79
| 0.626506
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,511
|
smf.py
|
rembo10_headphones/lib/mutagen/smf.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Standard MIDI File (SMF)"""
import struct
from mutagen import StreamInfo, MutagenError
from mutagen._file import FileType
from mutagen._util import loadfile, endswith
class SMFError(MutagenError):
pass
def _var_int(data, offset=0):
val = 0
while 1:
try:
x = data[offset]
except IndexError:
raise SMFError("Not enough data")
offset += 1
val = (val << 7) + (x & 0x7F)
if not (x & 0x80):
return val, offset
def _read_track(chunk):
"""Retuns a list of midi events and tempo change events"""
TEMPO, MIDI = range(2)
# Deviations: The running status should be reset on non midi events, but
# some files contain meta events inbetween.
# TODO: Offset and time signature are not considered.
tempos = []
events = []
chunk = bytearray(chunk)
deltasum = 0
status = 0
off = 0
while off < len(chunk):
delta, off = _var_int(chunk, off)
deltasum += delta
event_type = chunk[off]
off += 1
if event_type == 0xFF:
meta_type = chunk[off]
off += 1
num, off = _var_int(chunk, off)
# TODO: support offset/time signature
if meta_type == 0x51:
data = chunk[off:off + num]
if len(data) != 3:
raise SMFError
tempo = struct.unpack(">I", b"\x00" + bytes(data))[0]
tempos.append((deltasum, TEMPO, tempo))
off += num
elif event_type in (0xF0, 0xF7):
val, off = _var_int(chunk, off)
off += val
else:
if event_type < 0x80:
# if < 0x80 take the type from the previous midi event
off += 1
event_type = status
elif event_type < 0xF0:
off += 2
status = event_type
else:
raise SMFError("invalid event")
if event_type >> 4 in (0xD, 0xC):
off -= 1
events.append((deltasum, MIDI, delta))
return events, tempos
def _read_midi_length(fileobj):
"""Returns the duration in seconds. Can raise all kind of errors..."""
TEMPO, MIDI = range(2)
def read_chunk(fileobj):
info = fileobj.read(8)
if len(info) != 8:
raise SMFError("truncated")
chunklen = struct.unpack(">I", info[4:])[0]
data = fileobj.read(chunklen)
if len(data) != chunklen:
raise SMFError("truncated")
return info[:4], data
identifier, chunk = read_chunk(fileobj)
if identifier != b"MThd":
raise SMFError("Not a MIDI file")
if len(chunk) != 6:
raise SMFError("truncated")
format_, ntracks, tickdiv = struct.unpack(">HHH", chunk)
if format_ > 1:
raise SMFError("Not supported format %d" % format_)
if tickdiv >> 15:
# fps = (-(tickdiv >> 8)) & 0xFF
# subres = tickdiv & 0xFF
# never saw one of those
raise SMFError("Not supported timing interval")
# get a list of events and tempo changes for each track
tracks = []
first_tempos = None
for tracknum in range(ntracks):
identifier, chunk = read_chunk(fileobj)
if identifier != b"MTrk":
continue
events, tempos = _read_track(chunk)
# In case of format == 1, copy the first tempo list to all tracks
first_tempos = first_tempos or tempos
if format_ == 1:
tempos = list(first_tempos)
events += tempos
events.sort()
tracks.append(events)
# calculate the duration of each track
durations = []
for events in tracks:
tempo = 500000
parts = []
deltasum = 0
for (dummy, type_, data) in events:
if type_ == TEMPO:
parts.append((deltasum, tempo))
tempo = data
deltasum = 0
else:
deltasum += data
parts.append((deltasum, tempo))
duration = 0
for (deltasum, tempo) in parts:
quarter, tpq = deltasum / float(tickdiv), tempo
duration += (quarter * tpq)
duration /= 10 ** 6
durations.append(duration)
# return the longest one
return max(durations)
class SMFInfo(StreamInfo):
"""SMFInfo()
Attributes:
length (`float`): Length in seconds
"""
def __init__(self, fileobj):
"""Raises SMFError"""
self.length = _read_midi_length(fileobj)
def pprint(self):
return u"SMF, %.2f seconds" % self.length
class SMF(FileType):
"""SMF(filething)
Standard MIDI File (SMF)
Attributes:
info (`SMFInfo`)
tags: `None`
"""
_mimes = ["audio/midi", "audio/x-midi"]
@loadfile()
def load(self, filething):
try:
self.info = SMFInfo(filething.fileobj)
except IOError as e:
raise SMFError(e)
def add_tags(self):
raise SMFError("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return header.startswith(b"MThd") and (
endswith(filename, ".mid") or endswith(filename, ".midi"))
Open = SMF
error = SMFError
__all__ = ["SMF"]
| 5,664
|
Python
|
.py
| 167
| 25.371257
| 76
| 0.572582
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,512
|
_util.py
|
rembo10_headphones/lib/mutagen/_util.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Utility classes for Mutagen.
You should not rely on the interfaces here being stable. They are
intended for internal use in Mutagen only.
"""
import sys
import struct
import codecs
import errno
import decimal
from io import BytesIO
from collections import namedtuple
from contextlib import contextmanager
from functools import wraps
from fnmatch import fnmatchcase
_DEFAULT_BUFFER_SIZE = 2 ** 18
def endswith(text, end):
# usefull for paths which can be both, str and bytes
if isinstance(text, str):
if not isinstance(end, str):
end = end.decode("ascii")
else:
if not isinstance(end, bytes):
end = end.encode("ascii")
return text.endswith(end)
def reraise(tp, value, tb):
raise tp(value).with_traceback(tb)
def bchr(x):
return bytes([x])
def iterbytes(b):
return (bytes([v]) for v in b)
def intround(value):
"""Given a float returns a rounded int. Should give the same result on
both Py2/3
"""
return int(decimal.Decimal.from_float(
value).to_integral_value(decimal.ROUND_HALF_EVEN))
def is_fileobj(fileobj):
"""Returns:
bool: if an argument passed ot mutagen should be treated as a
file object
"""
return not (isinstance(fileobj, (str, bytes)) or
hasattr(fileobj, "__fspath__"))
def verify_fileobj(fileobj, writable=False):
"""Verifies that the passed fileobj is a file like object which
we can use.
Args:
writable (bool): verify that the file object is writable as well
Raises:
ValueError: In case the object is not a file object that is readable
(or writable if required) or is not opened in bytes mode.
"""
try:
data = fileobj.read(0)
except Exception:
if not hasattr(fileobj, "read"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't read from file object %r" % fileobj)
if not isinstance(data, bytes):
raise ValueError(
"file object %r not opened in binary mode" % fileobj)
if writable:
try:
fileobj.write(b"")
except Exception:
if not hasattr(fileobj, "write"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't write to file object %r" % fileobj)
def verify_filename(filename):
"""Checks of the passed in filename has the correct type.
Raises:
ValueError: if not a filename
"""
if is_fileobj(filename):
raise ValueError("%r not a filename" % filename)
def fileobj_name(fileobj):
"""
Returns:
text: A potential filename for a file object. Always a valid
path type, but might be empty or non-existent.
"""
value = getattr(fileobj, "name", u"")
if not isinstance(value, (str, bytes)):
value = str(value)
return value
def loadfile(method=True, writable=False, create=False):
"""A decorator for functions taking a `filething` as a first argument.
Passes a FileThing instance as the first argument to the wrapped function.
Args:
method (bool): If the wrapped functions is a method
writable (bool): If a filename is passed opens the file readwrite, if
passed a file object verifies that it is writable.
create (bool): If passed a filename that does not exist will create
a new empty file.
"""
def convert_file_args(args, kwargs):
filething = args[0] if args else None
filename = kwargs.pop("filename", None)
fileobj = kwargs.pop("fileobj", None)
return filething, filename, fileobj, args[1:], kwargs
def wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(self, filething, filename, fileobj,
writable, create) as h:
return func(self, h, *args, **kwargs)
@wraps(func)
def wrapper_func(*args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(None, filething, filename, fileobj,
writable, create) as h:
return func(h, *args, **kwargs)
return wrapper if method else wrapper_func
return wrap
def convert_error(exc_src, exc_dest):
"""A decorator for reraising exceptions with a different type.
Mostly useful for IOError.
Args:
exc_src (type): The source exception type
exc_dest (type): The target exception type.
"""
def wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc_dest:
raise
except exc_src as err:
reraise(exc_dest, err, sys.exc_info()[2])
return wrapper
return wrap
FileThing = namedtuple("FileThing", ["fileobj", "filename", "name"])
"""filename is None if the source is not a filename. name is a filename which
can be used for file type detection
"""
@contextmanager
def _openfile(instance, filething, filename, fileobj, writable, create):
"""yields a FileThing
Args:
filething: Either a file name, a file object or None
filename: Either a file name or None
fileobj: Either a file object or None
writable (bool): if the file should be opened
create (bool): if the file should be created if it doesn't exist.
implies writable
Raises:
MutagenError: In case opening the file failed
TypeError: in case neither a file name or a file object is passed
"""
assert not create or writable
# to allow stacked context managers, just pass the result through
if isinstance(filething, FileThing):
filename = filething.filename
fileobj = filething.fileobj
filething = None
if filething is not None:
if is_fileobj(filething):
fileobj = filething
elif hasattr(filething, "__fspath__"):
filename = filething.__fspath__()
if not isinstance(filename, (bytes, str)):
raise TypeError("expected __fspath__() to return a filename")
else:
filename = filething
if instance is not None:
# XXX: take "not writable" as loading the file..
if not writable:
instance.filename = filename
elif filename is None:
filename = getattr(instance, "filename", None)
if fileobj is not None:
verify_fileobj(fileobj, writable=writable)
yield FileThing(fileobj, filename, filename or fileobj_name(fileobj))
elif filename is not None:
verify_filename(filename)
inmemory_fileobj = False
try:
fileobj = open(filename, "rb+" if writable else "rb")
except IOError as e:
if writable and e.errno == errno.EOPNOTSUPP:
# Some file systems (gvfs over fuse) don't support opening
# files read/write. To make things still work read the whole
# file into an in-memory file like object and write it back
# later.
# https://github.com/quodlibet/mutagen/issues/300
try:
with open(filename, "rb") as fileobj:
fileobj = BytesIO(fileobj.read())
except IOError as e2:
raise MutagenError(e2)
inmemory_fileobj = True
elif create and e.errno == errno.ENOENT:
assert writable
try:
fileobj = open(filename, "wb+")
except IOError as e2:
raise MutagenError(e2)
else:
raise MutagenError(e)
with fileobj as fileobj:
yield FileThing(fileobj, filename, filename)
if inmemory_fileobj:
assert writable
data = fileobj.getvalue()
try:
with open(filename, "wb") as fileobj:
fileobj.write(data)
except IOError as e:
raise MutagenError(e)
else:
raise TypeError("Missing filename or fileobj argument")
class MutagenError(Exception):
"""Base class for all custom exceptions in mutagen
.. versionadded:: 1.25
"""
__module__ = "mutagen"
def total_ordering(cls):
"""Adds all possible ordering methods to a class.
Needs a working __eq__ and __lt__ and will supply the rest.
"""
assert "__eq__" in cls.__dict__
assert "__lt__" in cls.__dict__
cls.__le__ = lambda self, other: self == other or self < other
cls.__gt__ = lambda self, other: not (self == other or self < other)
cls.__ge__ = lambda self, other: not self < other
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def hashable(cls):
"""Makes sure the class is hashable.
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def enum(cls):
"""A decorator for creating an int enum class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an enum
Returns:
type: A new class
::
@enum
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
if self in map_:
return "%s.%s" % (type(self).__name__, map_[self])
return "%d" % int(self)
def repr_(self):
if self in map_:
return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self))
return "%d" % int(self)
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type
def flags(cls):
"""A decorator for creating an int flags class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an flags
Returns:
type: A new class
::
@flags
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
value = int(self)
matches = []
for k, v in map_.items():
if value & k:
matches.append("%s.%s" % (type(self).__name__, v))
value &= ~k
if value != 0 or not matches:
matches.append(str(value))
return " | ".join(matches)
def repr_(self):
return "<%s: %d>" % (str(self), int(self))
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type
@total_ordering
class DictMixin(object):
"""Implement the dict API using keys() and __*item__ methods.
Similar to UserDict.DictMixin, this takes a class that defines
__getitem__, __setitem__, __delitem__, and keys(), and turns it
into a full dict-like object.
UserDict.DictMixin is not suitable for this purpose because it's
an old-style class.
This class is not optimized for very large dictionaries; many
functions have linear memory requirements. I recommend you
override some of these functions if speed is required.
"""
def __iter__(self):
return iter(self.keys())
def __has_key(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
__contains__ = __has_key
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
for key in list(self.keys()):
self.__delitem__(key)
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop takes at most two arguments")
try:
value = self[key]
except KeyError:
if args:
return args[0]
else:
raise
del(self[key])
return value
def popitem(self):
for key in self.keys():
break
else:
raise KeyError("dictionary is empty")
return key, self.pop(key)
def update(self, other=None, **kwargs):
if other is None:
self.update(kwargs)
other = {}
try:
for key, value in other.items():
self.__setitem__(key, value)
except AttributeError:
for key, value in other:
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.items()))
def __eq__(self, other):
return dict(self.items()) == other
def __lt__(self, other):
return dict(self.items()) < other
__hash__ = object.__hash__
def __len__(self):
return len(self.keys())
class DictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__dict = {}
super(DictProxy, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict[key]
def __setitem__(self, key, value):
self.__dict[key] = value
def __delitem__(self, key):
del(self.__dict[key])
def keys(self):
return self.__dict.keys()
def _fill_cdata(cls):
"""Add struct pack/unpack functions"""
funcs = {}
for key, name in [("b", "char"), ("h", "short"),
("i", "int"), ("q", "longlong")]:
for echar, esuffix in [("<", "le"), (">", "be")]:
esuffix = "_" + esuffix
for unsigned in [True, False]:
s = struct.Struct(echar + (key.upper() if unsigned else key))
get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]
unpack = get_wrapper(s.unpack)
unpack_from = get_wrapper(s.unpack_from)
def get_unpack_from(s):
def unpack_from(data, offset=0):
return s.unpack_from(data, offset)[0], offset + s.size
return unpack_from
unpack_from = get_unpack_from(s)
pack = s.pack
prefix = "u" if unsigned else ""
if s.size == 1:
esuffix = ""
bits = str(s.size * 8)
if unsigned:
max_ = 2 ** (s.size * 8) - 1
min_ = 0
else:
max_ = 2 ** (s.size * 8 - 1) - 1
min_ = - 2 ** (s.size * 8 - 1)
funcs["%s%s_min" % (prefix, name)] = min_
funcs["%s%s_max" % (prefix, name)] = max_
funcs["%sint%s_min" % (prefix, bits)] = min_
funcs["%sint%s_max" % (prefix, bits)] = max_
funcs["%s%s%s" % (prefix, name, esuffix)] = unpack
funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack
funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from
funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from
funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack
funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack
for key, func in funcs.items():
setattr(cls, key, staticmethod(func))
class cdata(object):
"""C character buffer to Python numeric type conversions.
For each size/sign/endianness:
uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0)
"""
error = struct.error
bitswap = b''.join(
bchr(sum(((val >> i) & 1) << (7 - i) for i in range(8)))
for val in range(256))
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
_fill_cdata(cdata)
def get_size(fileobj):
"""Returns the size of the file.
The position when passed in will be preserved if no error occurs.
Args:
fileobj (fileobj)
Returns:
int: The size of the file
Raises:
IOError
"""
old_pos = fileobj.tell()
try:
fileobj.seek(0, 2)
return fileobj.tell()
finally:
fileobj.seek(old_pos, 0)
def read_full(fileobj, size):
"""Like fileobj.read but raises IOError if not all requested data is
returned.
If you want to distinguish IOError and the EOS case, better handle
the error yourself instead of using this.
Args:
fileobj (fileobj)
size (int): amount of bytes to read
Raises:
IOError: In case read fails or not enough data is read
"""
if size < 0:
raise ValueError("size must not be negative")
data = fileobj.read(size)
if len(data) != size:
raise IOError
return data
def seek_end(fileobj, offset):
"""Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError
"""
if offset < 0:
raise ValueError
if get_size(fileobj) < offset:
fileobj.seek(0, 0)
else:
fileobj.seek(-offset, 2)
def resize_file(fobj, diff, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Resize a file by `diff`.
New space will be filled with zeros.
Args:
fobj (fileobj)
diff (int): amount of size to change
Raises:
IOError
"""
fobj.seek(0, 2)
filesize = fobj.tell()
if diff < 0:
if filesize + diff < 0:
raise ValueError
# truncate flushes internally
fobj.truncate(filesize + diff)
elif diff > 0:
try:
while diff:
addsize = min(BUFFER_SIZE, diff)
fobj.write(b"\x00" * addsize)
diff -= addsize
fobj.flush()
except IOError as e:
if e.errno == errno.ENOSPC:
# To reduce the chance of corrupt files in case of missing
# space try to revert the file expansion back. Of course
# in reality every in-file-write can also fail due to COW etc.
# Note: IOError gets also raised in flush() due to buffering
fobj.truncate(filesize)
raise
def move_bytes(fobj, dest, src, count, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Moves data around using read()/write().
Args:
fileobj (fileobj)
dest (int): The destination offset
src (int): The source offset
count (int) The amount of data to move
Raises:
IOError: In case an operation on the fileobj fails
ValueError: In case invalid parameters were given
"""
if dest < 0 or src < 0 or count < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
if max(dest, src) + count > filesize:
raise ValueError("area outside of file")
if src > dest:
moved = 0
while count - moved:
this_move = min(BUFFER_SIZE, count - moved)
fobj.seek(src + moved)
buf = fobj.read(this_move)
fobj.seek(dest + moved)
fobj.write(buf)
moved += this_move
fobj.flush()
else:
while count:
this_move = min(BUFFER_SIZE, count)
fobj.seek(src + count - this_move)
buf = fobj.read(this_move)
fobj.seek(count + dest - this_move)
fobj.write(buf)
count -= this_move
fobj.flush()
def insert_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent.
Args:
fobj (fileobj)
size (int): The amount of space to insert
offset (int): The offset at which to insert the space
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
if movesize < 0:
raise ValueError
resize_file(fobj, size, BUFFER_SIZE)
move_bytes(fobj, offset + size, offset, movesize, BUFFER_SIZE)
def delete_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent.
Args:
fobj (fileobj)
size (int): The amount of space to delete
offset (int): The start of the space to delete
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset - size
if movesize < 0:
raise ValueError
move_bytes(fobj, offset, offset + size, movesize, BUFFER_SIZE)
resize_file(fobj, -size, BUFFER_SIZE)
def resize_bytes(fobj, old_size, new_size, offset):
"""Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
"""
if new_size < old_size:
delete_size = old_size - new_size
delete_at = offset + new_size
delete_bytes(fobj, delete_size, delete_at)
elif new_size > old_size:
insert_size = new_size - old_size
insert_at = offset + old_size
insert_bytes(fobj, insert_size, insert_at)
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in d.items():
if fnmatchcase(key, pattern):
return value
return default
def encode_endian(text, encoding, errors="strict", le=True):
"""Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
"""
encoding = codecs.lookup(encoding).name
if encoding == "utf-16":
if le:
return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors)
else:
return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors)
elif encoding == "utf-32":
if le:
return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors)
else:
return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors)
else:
return text.encode(encoding, errors)
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b""
class BitReaderError(Exception):
pass
class BitReader(object):
def __init__(self, fileobj):
self._fileobj = fileobj
self._buffer = 0
self._bits = 0
self._pos = fileobj.tell()
def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in range(count)))
def skip(self, count):
"""Skip `count` bits.
Might raise BitReaderError if there wasn't enough data to skip,
but might also fail on the next bits() instead.
"""
if count < 0:
raise ValueError
if count <= self._bits:
self.bits(count)
else:
count -= self.align()
n_bytes = count // 8
self._fileobj.seek(n_bytes, 1)
count -= n_bytes * 8
self.bits(count)
def get_position(self):
"""Returns the amount of bits read or skipped so far"""
return (self._fileobj.tell() - self._pos) * 8 - self._bits
def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits
def is_aligned(self):
"""If we are currently aligned to bytes and nothing is buffered"""
return self._bits == 0
| 29,488
|
Python
|
.py
| 795
| 28.23522
| 79
| 0.58558
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,513
|
__init__.py
|
rembo10_headphones/lib/mutagen/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Mutagen aims to be an all purpose multimedia tagging library.
::
import mutagen.[format]
metadata = mutagen.[format].Open(filename)
``metadata`` acts like a dictionary of tags in the file. Tags are generally a
list of string-like values, but may have additional methods available
depending on tag or format. They may also be entirely different objects
for certain keys, again depending on format.
"""
from mutagen._util import MutagenError
from mutagen._file import FileType, StreamInfo, File
from mutagen._tags import Tags, Metadata, PaddingInfo
version = (1, 45, 1)
"""Version tuple."""
version_string = ".".join(map(str, version))
"""Version string."""
MutagenError
FileType
StreamInfo
File
Tags
Metadata
PaddingInfo
| 1,055
|
Python
|
.py
| 30
| 33.433333
| 77
| 0.777448
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,514
|
easymp4.py
|
rembo10_headphones/lib/mutagen/easymp4.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from mutagen import Tags
from mutagen._util import DictMixin, dict_match
from mutagen.mp4 import MP4, MP4Tags, error, delete
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
class EasyMP4KeyError(error, KeyError, ValueError):
pass
class EasyMP4Tags(DictMixin, Tags):
"""EasyMP4Tags()
A file with MPEG-4 iTunes metadata.
Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII
strings, and values are a list of Unicode strings (and these lists
are always of length 0 or 1).
If you need access to the full MP4 metadata feature set, you should use
MP4, not EasyMP4.
"""
Set = {}
Get = {}
Delete = {}
List = {}
def __init__(self, *args, **kwargs):
self.__mp4 = MP4Tags(*args, **kwargs)
self.load = self.__mp4.load
self.save = self.__mp4.save
self.delete = self.__mp4.delete
filename = property(lambda s: s.__mp4.filename,
lambda s, fn: setattr(s.__mp4, 'filename', fn))
@property
def _padding(self):
return self.__mp4._padding
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an MP4Tags instance
and the requested key name. The setter also receives the
desired value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the MP4 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, atomid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 atom name to EasyMP4Tags key, then you can use this
function::
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
"""
def getter(tags, key):
return tags[atomid]
def setter(tags, key, value):
tags[atomid] = value
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
"""Register a scalar integer key.
"""
def getter(tags, key):
return list(map(str, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = [clamp(v) for v in map(int, value)]
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterIntPairKey(cls, key, atomid, min_value=0,
max_value=(2 ** 16) - 1):
def getter(tags, key):
ret = []
for (track, total) in tags[atomid]:
if total:
ret.append(u"%d/%d" % (track, total))
else:
ret.append(str(track))
return ret
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
data = []
for v in value:
try:
tracks, total = v.split("/")
tracks = clamp(int(tracks))
total = clamp(int(total))
except (ValueError, TypeError):
tracks = clamp(int(v))
total = min_value
data.append((tracks, total))
tags[atomid] = data
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", "MusicBrainz Artist Id")
"""
atomid = "----:" + mean + ":" + name
def getter(tags, key):
return [s.decode("utf-8", "replace") for s in tags[atomid]]
def setter(tags, key, value):
encoded = []
for v in value:
if not isinstance(v, str):
raise TypeError("%r not str" % v)
encoded.append(v.encode("utf-8"))
tags[atomid] = encoded
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter)
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if isinstance(value, str):
value = [value]
func = dict_match(self.Set, key)
if func is not None:
return func(self.__mp4, key, value)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key)
if func is not None:
return func(self.__mp4, key)
else:
raise EasyMP4KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__mp4, key))
elif key in self:
keys.append(key)
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
for atomid, key in {
'\xa9nam': 'title',
'\xa9alb': 'album',
'\xa9ART': 'artist',
'aART': 'albumartist',
'\xa9day': 'date',
'\xa9cmt': 'comment',
'desc': 'description',
'\xa9grp': 'grouping',
'\xa9gen': 'genre',
'cprt': 'copyright',
'soal': 'albumsort',
'soaa': 'albumartistsort',
'soar': 'artistsort',
'sonm': 'titlesort',
'soco': 'composersort',
}.items():
EasyMP4Tags.RegisterTextKey(key, atomid)
for name, key in {
'MusicBrainz Artist Id': 'musicbrainz_artistid',
'MusicBrainz Track Id': 'musicbrainz_trackid',
'MusicBrainz Album Id': 'musicbrainz_albumid',
'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'MusicIP PUID': 'musicip_puid',
'MusicBrainz Album Status': 'musicbrainz_albumstatus',
'MusicBrainz Album Type': 'musicbrainz_albumtype',
'MusicBrainz Release Country': 'releasecountry',
}.items():
EasyMP4Tags.RegisterFreeformKey(key, name)
for name, key in {
"tmpo": "bpm",
}.items():
EasyMP4Tags.RegisterIntKey(key, name)
for name, key in {
"trkn": "tracknumber",
"disk": "discnumber",
}.items():
EasyMP4Tags.RegisterIntPairKey(key, name)
class EasyMP4(MP4):
"""EasyMP4(filelike)
Like :class:`MP4 <mutagen.mp4.MP4>`, but uses :class:`EasyMP4Tags` for
tags.
Attributes:
info (`mutagen.mp4.MP4Info`)
tags (`EasyMP4Tags`)
"""
MP4Tags = EasyMP4Tags
Get = EasyMP4Tags.Get
Set = EasyMP4Tags.Set
Delete = EasyMP4Tags.Delete
List = EasyMP4Tags.List
RegisterTextKey = EasyMP4Tags.RegisterTextKey
RegisterKey = EasyMP4Tags.RegisterKey
| 8,585
|
Python
|
.py
| 228
| 28.807018
| 79
| 0.585974
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,515
|
_constants.py
|
rembo10_headphones/lib/mutagen/_constants.py
|
# -*- coding: utf-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Constants used by Mutagen."""
GENRES = [
u"Blues",
u"Classic Rock",
u"Country",
u"Dance",
u"Disco",
u"Funk",
u"Grunge",
u"Hip-Hop",
u"Jazz",
u"Metal",
u"New Age",
u"Oldies",
u"Other",
u"Pop",
u"R&B",
u"Rap",
u"Reggae",
u"Rock",
u"Techno",
u"Industrial",
u"Alternative",
u"Ska",
u"Death Metal",
u"Pranks",
u"Soundtrack",
u"Euro-Techno",
u"Ambient",
u"Trip-Hop",
u"Vocal",
u"Jazz+Funk",
u"Fusion",
u"Trance",
u"Classical",
u"Instrumental",
u"Acid",
u"House",
u"Game",
u"Sound Clip",
u"Gospel",
u"Noise",
u"Alt. Rock",
u"Bass",
u"Soul",
u"Punk",
u"Space",
u"Meditative",
u"Instrumental Pop",
u"Instrumental Rock",
u"Ethnic",
u"Gothic",
u"Darkwave",
u"Techno-Industrial",
u"Electronic",
u"Pop-Folk",
u"Eurodance",
u"Dream",
u"Southern Rock",
u"Comedy",
u"Cult",
u"Gangsta Rap",
u"Top 40",
u"Christian Rap",
u"Pop/Funk",
u"Jungle",
u"Native American",
u"Cabaret",
u"New Wave",
u"Psychedelic",
u"Rave",
u"Showtunes",
u"Trailer",
u"Lo-Fi",
u"Tribal",
u"Acid Punk",
u"Acid Jazz",
u"Polka",
u"Retro",
u"Musical",
u"Rock & Roll",
u"Hard Rock",
u"Folk",
u"Folk-Rock",
u"National Folk",
u"Swing",
u"Fast-Fusion",
u"Bebop",
u"Latin",
u"Revival",
u"Celtic",
u"Bluegrass",
u"Avantgarde",
u"Gothic Rock",
u"Progressive Rock",
u"Psychedelic Rock",
u"Symphonic Rock",
u"Slow Rock",
u"Big Band",
u"Chorus",
u"Easy Listening",
u"Acoustic",
u"Humour",
u"Speech",
u"Chanson",
u"Opera",
u"Chamber Music",
u"Sonata",
u"Symphony",
u"Booty Bass",
u"Primus",
u"Porn Groove",
u"Satire",
u"Slow Jam",
u"Club",
u"Tango",
u"Samba",
u"Folklore",
u"Ballad",
u"Power Ballad",
u"Rhythmic Soul",
u"Freestyle",
u"Duet",
u"Punk Rock",
u"Drum Solo",
u"A Cappella",
u"Euro-House",
u"Dance Hall",
u"Goa",
u"Drum & Bass",
u"Club-House",
u"Hardcore",
u"Terror",
u"Indie",
u"BritPop",
u"Afro-Punk",
u"Polsk Punk",
u"Beat",
u"Christian Gangsta Rap",
u"Heavy Metal",
u"Black Metal",
u"Crossover",
u"Contemporary Christian",
u"Christian Rock",
u"Merengue",
u"Salsa",
u"Thrash Metal",
u"Anime",
u"JPop",
u"Synthpop",
u"Abstract",
u"Art Rock",
u"Baroque",
u"Bhangra",
u"Big Beat",
u"Breakbeat",
u"Chillout",
u"Downtempo",
u"Dub",
u"EBM",
u"Eclectic",
u"Electro",
u"Electroclash",
u"Emo",
u"Experimental",
u"Garage",
u"Global",
u"IDM",
u"Illbient",
u"Industro-Goth",
u"Jam Band",
u"Krautrock",
u"Leftfield",
u"Lounge",
u"Math Rock",
u"New Romantic",
u"Nu-Breakz",
u"Post-Punk",
u"Post-Rock",
u"Psytrance",
u"Shoegaze",
u"Space Rock",
u"Trop Rock",
u"World Music",
u"Neoclassical",
u"Audiobook",
u"Audio Theatre",
u"Neue Deutsche Welle",
u"Podcast",
u"Indie Rock",
u"G-Funk",
u"Dubstep",
u"Garage Rock",
u"Psybient",
]
"""The ID3v1 genre list."""
| 3,643
|
Python
|
.py
| 202
| 13.222772
| 70
| 0.559174
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,516
|
oggflac.py
|
rembo10_headphones/lib/mutagen/oggflac.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Ogg FLAC comments.
This module handles FLAC files wrapped in an Ogg bitstream. The first
FLAC stream found is used. For 'naked' FLACs, see mutagen.flac.
This module is based off the specification at
http://flac.sourceforge.net/ogg_mapping.html.
"""
__all__ = ["OggFLAC", "Open", "delete"]
import struct
from io import BytesIO
from mutagen import StreamInfo
from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError
from mutagen._vorbis import VCommentDict
from mutagen._util import loadfile, convert_error
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggFLACHeaderError(error):
pass
class OggFLACStreamInfo(StreamInfo):
"""OggFLACStreamInfo()
Ogg FLAC stream info.
Attributes:
length (`float`): File length in seconds, as a float
channels (`float`): Number of channels
sample_rate (`int`): Sample rate in Hz"
"""
length = 0
channels = 0
sample_rate = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x7FFLAC"):
page = OggPage(fileobj)
major, minor, self.packets, flac = struct.unpack(
">BBH4s", page.packets[0][5:13])
if flac != b"fLaC":
raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac)
elif (major, minor) != (1, 0):
raise OggFLACHeaderError(
"unknown mapping version: %d.%d" % (major, minor))
self.serial = page.serial
# Skip over the block header.
stringobj = BytesIO(page.packets[0][17:])
try:
flac_info = FLACStreamInfo(stringobj)
except FLACError as e:
raise OggFLACHeaderError(e)
for attr in ["min_blocksize", "max_blocksize", "sample_rate",
"channels", "bits_per_sample", "total_samples", "length"]:
setattr(self, attr, getattr(flac_info, attr))
def _post_tags(self, fileobj):
if self.length:
return
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggFLACHeaderError
self.length = page.position / float(self.sample_rate)
def pprint(self):
return u"Ogg FLAC, %.2f seconds, %d Hz" % (
self.length, self.sample_rate)
class OggFLACVComment(VCommentDict):
def __init__(self, fileobj, info):
# data should be pointing at the start of an Ogg page, after
# the first FLAC page.
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
comment = BytesIO(OggPage.to_packets(pages)[0][4:])
super(OggFLACVComment, self).__init__(comment, framing=False)
def _inject(self, fileobj, padding_func):
"""Write tag data into the FLAC Vorbis comment packet/page."""
# Ogg FLAC has no convenient data marker like Vorbis, but the
# second packet - and second page - must be the comment data.
fileobj.seek(0)
page = OggPage(fileobj)
while not page.packets[0].startswith(b"\x7FFLAC"):
page = OggPage(fileobj)
first_page = page
while not (page.sequence == 1 and page.serial == first_page.serial):
page = OggPage(fileobj)
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == first_page.serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment block.
data = self.write(framing=False)
data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data
packets[0] = data
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggFLAC(OggFileType):
"""OggFLAC(filething)
An Ogg FLAC file.
Arguments:
filething (filething)
Attributes:
info (`OggFLACStreamInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_Info = OggFLACStreamInfo
_Tags = OggFLACVComment
_Error = OggFLACHeaderError
_mimes = ["audio/x-oggflac"]
info = None
tags = None
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (
(b"FLAC" in header) + (b"fLaC" in header)))
Open = OggFLAC
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggFLAC(filething)
filething.fileobj.seek(0)
t.delete(filething)
| 5,302
|
Python
|
.py
| 136
| 31.610294
| 79
| 0.641602
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,517
|
_file.py
|
rembo10_headphones/lib/mutagen/_file.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import warnings
from mutagen._util import DictMixin, loadfile
class FileType(DictMixin):
"""FileType(filething, **kwargs)
Args:
filething (filething): A filename or a file-like object
Subclasses might take further options via keyword arguments.
An abstract object wrapping tags and audio stream information.
Each file format has different potential tags and stream
information.
FileTypes implement an interface very similar to Metadata; the
dict interface, save, load, and delete calls on a FileType call
the appropriate methods on its tag data.
Attributes:
info (`StreamInfo`): contains length, bitrate, sample rate
tags (`Tags`): metadata tags, if any, otherwise `None`
"""
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, *args, **kwargs):
if not args and not kwargs:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(*args, **kwargs)
@loadfile()
def load(self, filething, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
"""Look up a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
"""Delete a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
"""Return a list of keys in the metadata tag.
If the file has no tags at all, an empty list is returned.
"""
if self.tags is None:
return []
else:
return self.tags.keys()
@loadfile(writable=True)
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
In cases where the tagging format is independent of the file type
(for example `mutagen.id3.ID3`) all traces of the tagging format will
be removed.
In cases where the tag is part of the file type, all tags and
padding will be removed.
The tags attribute will be cleared as well if there is one.
Does nothing if the file has no tags.
Raises:
mutagen.MutagenError: if deleting wasn't possible
"""
if self.tags is not None:
return self.tags.delete(filething)
@loadfile(writable=True)
def save(self, filething=None, **kwargs):
"""save(filething=None, **kwargs)
Save metadata tags.
Raises:
MutagenError: if saving wasn't possible
"""
if self.tags is not None:
return self.tags.save(filething, **kwargs)
def pprint(self):
"""
Returns:
text: stream information and comment key=value pairs.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
"""Adds new tags to the file.
Raises:
mutagen.MutagenError:
if tags already exist or adding is not possible.
"""
raise NotImplementedError
@property
def mime(self):
"""A list of mime types (:class:`mutagen.text`)"""
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
"""Returns a score for how likely the file can be parsed by this type.
Args:
filename (fspath): a file path
fileobj (fileobj): a file object open in rb mode. Position is
undefined
header (bytes): data of undefined length, starts with the start of
the file.
Returns:
int: negative if definitely not a matching type, otherwise a score,
the bigger the more certain that the file can be loaded.
"""
raise NotImplementedError
class StreamInfo(object):
"""Abstract stream information object.
Provides attributes for length, bitrate, sample rate etc.
See the implementations for details.
"""
__module__ = "mutagen"
def pprint(self):
"""
Returns:
text: Print stream information
"""
raise NotImplementedError
@loadfile(method=False)
def File(filething, options=None, easy=False):
"""File(filething, options=None, easy=False)
Guess the type of the file and try to open it.
The file type is decided by several things, such as the first 128
bytes (which usually contains a file type identifier), the
filename extension, and the presence of existing tags.
If no appropriate type could be found, None is returned.
Args:
filething (filething)
options: Sequence of :class:`FileType` implementations,
defaults to all included ones.
easy (bool): If the easy wrappers should be returned if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead of
:class:`MP3 <mp3.MP3>`.
Returns:
FileType: A FileType instance for the detected type or `None` in case
the type couldn't be determined.
Raises:
MutagenError: in case the detected type fails to load the file.
"""
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.ac3 import AC3
from mutagen.smf import SMF
from mutagen.tak import TAK
from mutagen.dsf import DSF
from mutagen.dsdiff import DSDIFF
from mutagen.wave import WAVE
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, AC3,
SMF, TAK, DSF, DSDIFF, WAVE]
if not options:
return None
fileobj = filething.fileobj
try:
header = fileobj.read(128)
except IOError:
header = b""
# Sort by name after score. Otherwise import order affects
# Kind sort order, which affects treatment of things with
# equals scores.
results = [(Kind.score(filething.name, fileobj, header), Kind.__name__)
for Kind in options]
results = list(zip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
try:
fileobj.seek(0, 0)
except IOError:
pass
return Kind(fileobj, filename=filething.filename)
else:
return None
| 8,870
|
Python
|
.py
| 234
| 29.012821
| 79
| 0.624956
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,518
|
_riff.py
|
rembo10_headphones/lib/mutagen/_riff.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Borewit
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Resource Interchange File Format (RIFF)."""
import struct
from struct import pack
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
InvalidChunk,
)
class RiffChunk(IffChunk):
"""Generic RIFF chunk"""
@classmethod
def parse_header(cls, header):
return struct.unpack('<4sI', header)
@classmethod
def get_class(cls, id):
if id in (u'LIST', u'RIFF'):
return RiffListChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('<4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('<I', self.data_size))
class RiffListChunk(RiffChunk, IffContainerChunkMixin):
"""A RIFF chunk containing other chunks.
This is either a 'LIST' or 'RIFF'
"""
def parse_next_subchunk(self):
return RiffChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id not in (u'RIFF', u'LIST'):
raise InvalidChunk('Expected RIFF or LIST chunk, got %s' % id)
RiffChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class RiffFile(IffFile):
"""Representation of a RIFF file"""
def __init__(self, fileobj):
super().__init__(RiffChunk, fileobj)
if self.root.id != u'RIFF':
raise InvalidChunk("Root chunk must be a RIFF chunk, got %s"
% self.root.id)
self.file_type = self.root.name
| 1,887
|
Python
|
.py
| 51
| 30.784314
| 74
| 0.649422
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,519
|
_iff.py
|
rembo10_headphones/lib/mutagen/_iff.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2017 Borewit
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Base classes for various IFF based formats (e.g. AIFF or RIFF)."""
import sys
from mutagen.id3 import ID3
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
MutagenError,
convert_error,
delete_bytes,
insert_bytes,
loadfile,
reraise,
resize_bytes,
)
class error(MutagenError):
pass
class InvalidChunk(error):
pass
class EmptyChunk(InvalidChunk):
pass
def is_valid_chunk_id(id):
""" is_valid_chunk_id(FOURCC)
Arguments:
id (FOURCC)
Returns:
true if valid; otherwise false
Check if argument id is valid FOURCC type.
"""
assert isinstance(id, str), \
'id is of type %s, must be str: %r' % (type(id), id)
return ((0 < len(id) <= 4) and (min(id) >= ' ') and
(max(id) <= '~'))
# Assert FOURCC formatted valid
def assert_valid_chunk_id(id):
if not is_valid_chunk_id(id):
raise ValueError("IFF chunk ID must be four ASCII characters.")
class IffChunk(object):
"""Generic representation of a single IFF chunk.
IFF chunks always consist of an ID followed by the chunk size. The exact
format varies between different IFF based formats, e.g. AIFF uses
big-endian while RIFF uses little-endian.
"""
# Chunk headers are usually 8 bytes long (4 for ID and 4 for the size)
HEADER_SIZE = 8
@classmethod
def parse_header(cls, header):
"""Read ID and data_size from the given header.
Must be implemented in subclasses."""
raise error("Not implemented")
def write_new_header(self, id_, size):
"""Write the chunk header with id_ and size to the file.
Must be implemented in subclasses. The data must be written
to the current position in self._fileobj."""
raise error("Not implemented")
def write_size(self):
"""Write self.data_size to the file.
Must be implemented in subclasses. The data must be written
to the current position in self._fileobj."""
raise error("Not implemented")
@classmethod
def get_class(cls, id):
"""Returns the class for a new chunk for a given ID.
Can be overridden in subclasses to implement specific chunk types."""
return cls
@classmethod
def parse(cls, fileobj, parent_chunk=None):
header = fileobj.read(cls.HEADER_SIZE)
if len(header) < cls.HEADER_SIZE:
raise EmptyChunk('Header size < %i' % cls.HEADER_SIZE)
id, data_size = cls.parse_header(header)
try:
id = id.decode('ascii').rstrip()
except UnicodeDecodeError as e:
raise InvalidChunk(e)
if not is_valid_chunk_id(id):
raise InvalidChunk('Invalid chunk ID %r' % id)
return cls.get_class(id)(fileobj, id, data_size, parent_chunk)
def __init__(self, fileobj, id, data_size, parent_chunk):
self._fileobj = fileobj
self.id = id
self.data_size = data_size
self.parent_chunk = parent_chunk
self.data_offset = fileobj.tell()
self.offset = self.data_offset - self.HEADER_SIZE
self._calculate_size()
def __repr__(self):
return ("<%s id=%s, offset=%i, size=%i, data_offset=%i, data_size=%i>"
% (type(self).__name__, self.id, self.offset, self.size,
self.data_offset, self.data_size))
def read(self):
"""Read the chunks data"""
self._fileobj.seek(self.data_offset)
return self._fileobj.read(self.data_size)
def write(self, data):
"""Write the chunk data"""
if len(data) > self.data_size:
raise ValueError
self._fileobj.seek(self.data_offset)
self._fileobj.write(data)
# Write the padding bytes
padding = self.padding()
if padding:
self._fileobj.seek(self.data_offset + self.data_size)
self._fileobj.write(b'\x00' * padding)
def delete(self):
"""Removes the chunk from the file"""
delete_bytes(self._fileobj, self.size, self.offset)
if self.parent_chunk is not None:
self.parent_chunk._remove_subchunk(self)
self._fileobj.flush()
def _update_size(self, size_diff, changed_subchunk=None):
"""Update the size of the chunk"""
old_size = self.size
self.data_size += size_diff
self._fileobj.seek(self.offset + 4)
self.write_size()
self._calculate_size()
if self.parent_chunk is not None:
self.parent_chunk._update_size(self.size - old_size, self)
if changed_subchunk:
self._update_sibling_offsets(
changed_subchunk, old_size - self.size)
def _calculate_size(self):
self.size = self.HEADER_SIZE + self.data_size + self.padding()
assert self.size % 2 == 0
def resize(self, new_data_size):
"""Resize the file and update the chunk sizes"""
padding = new_data_size % 2
resize_bytes(self._fileobj, self.data_size + self.padding(),
new_data_size + padding, self.data_offset)
size_diff = new_data_size - self.data_size
self._update_size(size_diff)
self._fileobj.flush()
def padding(self):
"""Returns the number of padding bytes (0 or 1).
IFF chunks are required to be a even number in total length. If
data_size is odd a padding byte will be added at the end.
"""
return self.data_size % 2
class IffContainerChunkMixin():
"""A IFF chunk containing other chunks.
A container chunk can have an additional name as the first 4 bytes of the
chunk data followed by an arbitrary number of subchunks. The root chunk of
the file is always a container chunk (e.g. the AIFF chunk or the FORM chunk
for RIFF) but there can be other types of container chunks (e.g. the LIST
chunks used in RIFF).
"""
def parse_next_subchunk(self):
""""""
raise error("Not implemented")
def init_container(self, name_size=4):
# Lists can store an additional name identifier before the subchunks
self.__name_size = name_size
if self.data_size < name_size:
raise InvalidChunk(
'Container chunk data size < %i' % name_size)
# Read the container name
if name_size > 0:
try:
self.name = self._fileobj.read(name_size).decode('ascii')
except UnicodeDecodeError as e:
raise error(e)
else:
self.name = None
# Load all IFF subchunks
self.__subchunks = []
def subchunks(self):
"""Returns a list of all subchunks.
The list is lazily loaded on first access.
"""
if not self.__subchunks:
next_offset = self.data_offset + self.__name_size
while next_offset < self.offset + self.size:
self._fileobj.seek(next_offset)
try:
chunk = self.parse_next_subchunk()
except EmptyChunk:
break
except InvalidChunk:
break
self.__subchunks.append(chunk)
# Calculate the location of the next chunk
next_offset = chunk.offset + chunk.size
return self.__subchunks
def insert_chunk(self, id_, data=None):
"""Insert a new chunk at the end of the container chunk"""
if not is_valid_chunk_id(id_):
raise KeyError("Invalid IFF key.")
next_offset = self.offset + self.size
size = self.HEADER_SIZE
data_size = 0
if data:
data_size = len(data)
padding = data_size % 2
size += data_size + padding
insert_bytes(self._fileobj, size, next_offset)
self._fileobj.seek(next_offset)
self.write_new_header(id_.ljust(4).encode('ascii'), data_size)
self._fileobj.seek(next_offset)
chunk = self.parse_next_subchunk()
self._update_size(chunk.size)
if data:
chunk.write(data)
self.subchunks().append(chunk)
self._fileobj.flush()
return chunk
def __contains__(self, id_):
"""Check if this chunk contains a specific subchunk."""
assert_valid_chunk_id(id_)
try:
self[id_]
return True
except KeyError:
return False
def __getitem__(self, id_):
"""Get a subchunk by ID."""
assert_valid_chunk_id(id_)
found_chunk = None
for chunk in self.subchunks():
if chunk.id == id_:
found_chunk = chunk
break
else:
raise KeyError("No %r chunk found" % id_)
return found_chunk
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
assert_valid_chunk_id(id_)
self[id_].delete()
def _remove_subchunk(self, chunk):
assert chunk in self.__subchunks
self._update_size(-chunk.size, chunk)
self.__subchunks.remove(chunk)
def _update_sibling_offsets(self, changed_subchunk, size_diff):
"""Update the offsets of subchunks after `changed_subchunk`.
"""
index = self.__subchunks.index(changed_subchunk)
sibling_chunks = self.__subchunks[index + 1:len(self.__subchunks)]
for sibling in sibling_chunks:
sibling.offset -= size_diff
sibling.data_offset -= size_diff
class IffFile:
"""Representation of a IFF file"""
def __init__(self, chunk_cls, fileobj):
fileobj.seek(0)
self.root = chunk_cls.parse(fileobj)
def __contains__(self, id_):
"""Check if the IFF file contains a specific chunk"""
return id_ in self.root
def __getitem__(self, id_):
"""Get a chunk from the IFF file"""
return self.root[id_]
def __delitem__(self, id_):
"""Remove a chunk from the IFF file"""
self.delete_chunk(id_)
def delete_chunk(self, id_):
"""Remove a chunk from the IFF file"""
del self.root[id_]
def insert_chunk(self, id_, data=None):
"""Insert a new chunk at the end of the IFF file"""
return self.root.insert_chunk(id_, data)
class IffID3(ID3):
"""A generic IFF file with ID3v2 tags"""
def _load_file(self, fileobj):
raise error("Not implemented")
def _pre_load_header(self, fileobj):
try:
fileobj.seek(self._load_file(fileobj)['ID3'].data_offset)
except (InvalidChunk, KeyError):
raise ID3NoHeaderError("No ID3 chunk")
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the IFF file"""
fileobj = filething.fileobj
iff_file = self._load_file(fileobj)
if 'ID3' not in iff_file:
iff_file.insert_chunk('ID3')
chunk = iff_file['ID3']
try:
data = self._prepare_data(
fileobj, chunk.data_offset, chunk.data_size, v2_version,
v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
chunk.resize(len(data))
chunk.write(data)
@convert_error(IOError, error)
@loadfile(writable=True)
def delete(self, filething=None):
"""Completely removes the ID3 chunk from the IFF file"""
try:
iff_file = self._load_file(filething.fileobj)
del iff_file['ID3']
except KeyError:
pass
self.clear()
| 12,121
|
Python
|
.py
| 306
| 30.921569
| 79
| 0.606698
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,520
|
tak.py
|
rembo10_headphones/lib/mutagen/tak.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Lukáš Lalinský
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Tom's lossless Audio Kompressor (TAK) streams with APEv2 tags.
TAK is a lossless audio compressor developed by Thomas Becker.
For more information, see:
* http://www.thbeck.de/Tak/Tak.html
* http://wiki.hydrogenaudio.org/index.php?title=TAK
"""
__all__ = ["TAK", "Open", "delete"]
import struct
from mutagen import StreamInfo
from mutagen.apev2 import (
APEv2File,
delete,
error,
)
from mutagen._util import (
BitReader,
BitReaderError,
convert_error,
enum,
endswith,
)
@enum
class TAKMetadata(object):
END = 0
STREAM_INFO = 1
SEEK_TABLE = 2 # Removed in TAK 1.1.1
SIMPLE_WAVE_DATA = 3
ENCODER_INFO = 4
UNUSED_SPACE = 5 # New in TAK 1.0.3
MD5 = 6 # New in TAK 1.1.1
LAST_FRAME_INFO = 7 # New in TAK 1.1.1
CRC_SIZE = 3
ENCODER_INFO_CODEC_BITS = 6
ENCODER_INFO_PROFILE_BITS = 4
ENCODER_INFO_TOTAL_BITS = ENCODER_INFO_CODEC_BITS + ENCODER_INFO_PROFILE_BITS
SIZE_INFO_FRAME_DURATION_BITS = 4
SIZE_INFO_SAMPLE_NUM_BITS = 35
SIZE_INFO_TOTAL_BITS = (SIZE_INFO_FRAME_DURATION_BITS
+ SIZE_INFO_SAMPLE_NUM_BITS)
AUDIO_FORMAT_DATA_TYPE_BITS = 3
AUDIO_FORMAT_SAMPLE_RATE_BITS = 18
AUDIO_FORMAT_SAMPLE_BITS_BITS = 5
AUDIO_FORMAT_CHANNEL_NUM_BITS = 4
AUDIO_FORMAT_HAS_EXTENSION_BITS = 1
AUDIO_FORMAT_BITS_MIN = 31
AUDIO_FORMAT_BITS_MAX = 31 + 102
SAMPLE_RATE_MIN = 6000
SAMPLE_BITS_MIN = 8
CHANNEL_NUM_MIN = 1
STREAM_INFO_BITS_MIN = (ENCODER_INFO_TOTAL_BITS
+ SIZE_INFO_TOTAL_BITS
+ AUDIO_FORMAT_BITS_MIN)
STREAM_INFO_BITS_MAX = (ENCODER_INFO_TOTAL_BITS
+ SIZE_INFO_TOTAL_BITS
+ AUDIO_FORMAT_BITS_MAX)
STREAM_INFO_SIZE_MIN = (STREAM_INFO_BITS_MIN + 7) / 8
STREAM_INFO_SIZE_MAX = (STREAM_INFO_BITS_MAX + 7) / 8
class _LSBBitReader(BitReader):
"""BitReader implementation which reads bits starting at LSB in each byte.
"""
def _lsb(self, count):
value = self._buffer & 0xff >> (8 - count)
self._buffer = self._buffer >> count
self._bits -= count
return value
def bits(self, count):
"""Reads `count` bits and returns an uint, LSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
value = 0
if count <= self._bits:
value = self._lsb(count)
else:
# First read all available bits
shift = 0
remaining = count
if self._bits > 0:
remaining -= self._bits
shift = self._bits
value = self._lsb(self._bits)
assert self._bits == 0
# Now add additional bytes
n_bytes = (remaining - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
if remaining > 8: # Use full byte
remaining -= 8
value = (b << shift) | value
shift += 8
else:
self._buffer = b
self._bits = 8
b = self._lsb(remaining)
value = (b << shift) | value
assert 0 <= self._bits < 8
return value
class TAKHeaderError(error):
pass
class TAKInfo(StreamInfo):
"""TAK stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): audio sample size
encoder_info (`mutagen.text`): encoder version
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
encoder_info = ""
@convert_error(IOError, TAKHeaderError)
@convert_error(BitReaderError, TAKHeaderError)
def __init__(self, fileobj):
stream_id = fileobj.read(4)
if len(stream_id) != 4 or not stream_id == b"tBaK":
raise TAKHeaderError("not a TAK file")
bitreader = _LSBBitReader(fileobj)
while True:
type = TAKMetadata(bitreader.bits(7))
bitreader.skip(1) # Unused
size = struct.unpack("<I", bitreader.bytes(3) + b'\0')[0]
data_size = size - CRC_SIZE
pos = fileobj.tell()
if type == TAKMetadata.END:
break
elif type == TAKMetadata.STREAM_INFO:
self._parse_stream_info(bitreader, size)
elif type == TAKMetadata.ENCODER_INFO:
self._parse_encoder_info(bitreader, data_size)
assert bitreader.is_aligned()
fileobj.seek(pos + size)
if self.sample_rate > 0:
self.length = self.number_of_samples / float(self.sample_rate)
def _parse_stream_info(self, bitreader, size):
if size < STREAM_INFO_SIZE_MIN or size > STREAM_INFO_SIZE_MAX:
raise TAKHeaderError("stream info has invalid length")
# Encoder Info
bitreader.skip(ENCODER_INFO_CODEC_BITS)
bitreader.skip(ENCODER_INFO_PROFILE_BITS)
# Size Info
bitreader.skip(SIZE_INFO_FRAME_DURATION_BITS)
self.number_of_samples = bitreader.bits(SIZE_INFO_SAMPLE_NUM_BITS)
# Audio Format
bitreader.skip(AUDIO_FORMAT_DATA_TYPE_BITS)
self.sample_rate = (bitreader.bits(AUDIO_FORMAT_SAMPLE_RATE_BITS)
+ SAMPLE_RATE_MIN)
self.bits_per_sample = (bitreader.bits(AUDIO_FORMAT_SAMPLE_BITS_BITS)
+ SAMPLE_BITS_MIN)
self.channels = (bitreader.bits(AUDIO_FORMAT_CHANNEL_NUM_BITS)
+ CHANNEL_NUM_MIN)
bitreader.skip(AUDIO_FORMAT_HAS_EXTENSION_BITS)
def _parse_encoder_info(self, bitreader, size):
patch = bitreader.bits(8)
minor = bitreader.bits(8)
major = bitreader.bits(8)
self.encoder_info = "TAK %d.%d.%d" % (major, minor, patch)
def pprint(self):
return u"%s, %d Hz, %d bits, %.2f seconds, %d channel(s)" % (
self.encoder_info or "TAK", self.sample_rate, self.bits_per_sample,
self.length, self.channels)
class TAK(APEv2File):
"""TAK(filething)
Arguments:
filething (filething)
Attributes:
info (`TAKInfo`)
"""
_Info = TAKInfo
_mimes = ["audio/x-tak"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"tBaK") + endswith(filename.lower(), ".tak")
Open = TAK
| 7,062
|
Python
|
.py
| 188
| 29.159574
| 79
| 0.604603
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,521
|
aac.py
|
rembo10_headphones/lib/mutagen/aac.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
* ADTS - Audio Data Transport Stream
* ADIF - Audio Data Interchange Format
* See ISO/IEC 13818-7 / 14496-03
"""
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import BitReader, BitReaderError, MutagenError, loadfile, \
convert_error, endswith
from mutagen.id3._util import BitPaddedInt
_FREQS = [
96000, 88200, 64000, 48000,
44100, 32000, 24000, 22050,
16000, 12000, 11025, 8000,
7350,
]
class _ADTSStream(object):
"""Represents a series of frames belonging to the same stream"""
parsed_frames = 0
"""Number of successfully parsed frames"""
offset = 0
"""offset in bytes at which the stream starts (the first sync word)"""
@classmethod
def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
"""
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream
def sync(self, max_bytes):
"""Find the next sync.
Returns True if found."""
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False
def __init__(self, r):
"""Use _ADTSStream.find_stream to create a stream"""
self._fixed_header_key = None
self._r = r
self.offset = -1
self.parsed_frames = 0
self._samples = 0
self._payload = 0
self._start = r.get_position() / 8
self._last = self._start
@property
def bitrate(self):
"""Bitrate of the raw aac blocks, excluding framing/crc"""
assert self.parsed_frames, "no frame parsed yet"
if self._samples == 0:
return 0
return (8 * self._payload * self.frequency) // self._samples
@property
def samples(self):
"""samples so far"""
assert self.parsed_frames, "no frame parsed yet"
return self._samples
@property
def size(self):
"""bytes read in the stream so far (including framing)"""
assert self.parsed_frames, "no frame parsed yet"
return self._last - self._start
@property
def channels(self):
"""0 means unknown"""
assert self.parsed_frames, "no frame parsed yet"
b_index = self._fixed_header_key[6]
if b_index == 7:
return 8
elif b_index > 7:
return 0
else:
return b_index
@property
def frequency(self):
"""0 means unknown"""
assert self.parsed_frames, "no frame parsed yet"
f_index = self._fixed_header_key[4]
try:
return _FREQS[f_index]
except IndexError:
return 0
def parse_frame(self):
"""True if parsing was successful.
Fails either because the frame wasn't valid or the stream ended.
"""
try:
return self._parse_frame()
except BitReaderError:
return False
def _parse_frame(self):
r = self._r
# start == position of sync word
start = r.get_position() - 12
# adts_fixed_header
id_ = r.bits(1)
layer = r.bits(2)
protection_absent = r.bits(1)
profile = r.bits(2)
sampling_frequency_index = r.bits(4)
private_bit = r.bits(1)
# TODO: if 0 we could parse program_config_element()
channel_configuration = r.bits(3)
original_copy = r.bits(1)
home = r.bits(1)
# the fixed header has to be the same for every frame in the stream
fixed_header_key = (
id_, layer, protection_absent, profile, sampling_frequency_index,
private_bit, channel_configuration, original_copy, home,
)
if self._fixed_header_key is None:
self._fixed_header_key = fixed_header_key
else:
if self._fixed_header_key != fixed_header_key:
return False
# adts_variable_header
r.skip(2) # copyright_identification_bit/start
frame_length = r.bits(13)
r.skip(11) # adts_buffer_fullness
nordbif = r.bits(2)
# adts_variable_header end
crc_overhead = 0
if not protection_absent:
crc_overhead += (nordbif + 1) * 16
if nordbif != 0:
crc_overhead *= 2
left = (frame_length * 8) - (r.get_position() - start)
if left < 0:
return False
r.skip(left)
assert r.is_aligned()
self._payload += (left - crc_overhead) / 8
self._samples += (nordbif + 1) * 1024
self._last = r.get_position() / 8
self.parsed_frames += 1
return True
class ProgramConfigElement(object):
element_instance_tag = None
object_type = None
sampling_frequency_index = None
channels = None
def __init__(self, r):
"""Reads the program_config_element()
Raises BitReaderError
"""
self.element_instance_tag = r.bits(4)
self.object_type = r.bits(2)
self.sampling_frequency_index = r.bits(4)
num_front_channel_elements = r.bits(4)
num_side_channel_elements = r.bits(4)
num_back_channel_elements = r.bits(4)
num_lfe_channel_elements = r.bits(2)
num_assoc_data_elements = r.bits(3)
num_valid_cc_elements = r.bits(4)
mono_mixdown_present = r.bits(1)
if mono_mixdown_present == 1:
r.skip(4)
stereo_mixdown_present = r.bits(1)
if stereo_mixdown_present == 1:
r.skip(4)
matrix_mixdown_idx_present = r.bits(1)
if matrix_mixdown_idx_present == 1:
r.skip(3)
elms = num_front_channel_elements + num_side_channel_elements + \
num_back_channel_elements
channels = 0
for i in range(elms):
channels += 1
element_is_cpe = r.bits(1)
if element_is_cpe:
channels += 1
r.skip(4)
channels += num_lfe_channel_elements
self.channels = channels
r.skip(4 * num_lfe_channel_elements)
r.skip(4 * num_assoc_data_elements)
r.skip(5 * num_valid_cc_elements)
r.align()
comment_field_bytes = r.bits(8)
r.skip(8 * comment_field_bytes)
class AACError(MutagenError):
pass
class AACInfo(StreamInfo):
"""AACInfo()
AAC stream information.
The length of the stream is just a guess and might not be correct.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
@convert_error(IOError, AACError)
def __init__(self, fileobj):
"""Raises AACError"""
# skip id3v2 header
start_offset = 0
header = fileobj.read(10)
if header.startswith(b"ID3"):
size = BitPaddedInt(header[6:])
start_offset = size + 10
fileobj.seek(start_offset)
adif = fileobj.read(4)
if adif == b"ADIF":
self._parse_adif(fileobj)
self._type = "ADIF"
else:
self._parse_adts(fileobj, start_offset)
self._type = "ADTS"
def _parse_adif(self, fileobj):
r = BitReader(fileobj)
try:
copyright_id_present = r.bits(1)
if copyright_id_present:
r.skip(72) # copyright_id
r.skip(1 + 1) # original_copy, home
bitstream_type = r.bits(1)
self.bitrate = r.bits(23)
npce = r.bits(4)
if bitstream_type == 0:
r.skip(20) # adif_buffer_fullness
pce = ProgramConfigElement(r)
try:
self.sample_rate = _FREQS[pce.sampling_frequency_index]
except IndexError:
pass
self.channels = pce.channels
# other pces..
for i in range(npce):
ProgramConfigElement(r)
r.align()
except BitReaderError as e:
raise AACError(e)
# use bitrate + data size to guess length
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
if self.bitrate != 0:
self.length = (8.0 * length) / self.bitrate
def _parse_adts(self, fileobj, start_offset):
max_initial_read = 512
max_resync_read = 10
max_sync_tries = 10
frames_max = 100
frames_needed = 3
# Try up to X times to find a sync word and read up to Y frames.
# If more than Z frames are valid we assume a valid stream
offset = start_offset
for i in range(max_sync_tries):
fileobj.seek(offset)
s = _ADTSStream.find_stream(fileobj, max_initial_read)
if s is None:
raise AACError("sync not found")
# start right after the last found offset
offset += s.offset + 1
for i in range(frames_max):
if not s.parse_frame():
break
if not s.sync(max_resync_read):
break
if s.parsed_frames >= frames_needed:
break
else:
raise AACError(
"no valid stream found (only %d frames)" % s.parsed_frames)
self.sample_rate = s.frequency
self.channels = s.channels
self.bitrate = s.bitrate
# size from stream start to end of file
fileobj.seek(0, 2)
stream_size = fileobj.tell() - (offset + s.offset)
# approx
self.length = 0.0
if s.frequency != 0:
self.length = \
float(s.samples * stream_size) / (s.size * s.frequency)
def pprint(self):
return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self._type, self.sample_rate, self.length, self.channels,
self.bitrate)
class AAC(FileType):
"""AAC(filething)
Arguments:
filething (filething)
Load ADTS or ADIF streams containing AAC.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
Attributes:
info (`AACInfo`)
"""
_mimes = ["audio/x-aac"]
@loadfile()
def load(self, filething):
self.info = AACInfo(filething.fileobj)
def add_tags(self):
raise AACError("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
s = endswith(filename, ".aac") or endswith(filename, ".adts") or \
endswith(filename, ".adif")
s += b"ADIF" in header
return s
Open = AAC
error = AACError
__all__ = ["AAC", "Open"]
| 11,765
|
Python
|
.py
| 335
| 25.746269
| 78
| 0.568204
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,522
|
_tags.py
|
rembo10_headphones/lib/mutagen/_tags.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from ._util import loadfile
class PaddingInfo(object):
"""PaddingInfo()
Abstract padding information object.
This will be passed to the callback function that can be used
for saving tags.
::
def my_callback(info: PaddingInfo):
return info.get_default_padding()
The callback should return the amount of padding to use (>= 0) based on
the content size and the padding of the file after saving. The actual used
amount of padding might vary depending on the file format (due to
alignment etc.)
The default implementation can be accessed using the
:meth:`get_default_padding` method in the callback.
Attributes:
padding (`int`): The amount of padding left after saving in bytes
(can be negative if more data needs to be added as padding is
available)
size (`int`): The amount of data following the padding
"""
def __init__(self, padding, size):
self.padding = padding
self.size = size
def get_default_padding(self):
"""The default implementation which tries to select a reasonable
amount of padding and which might change in future versions.
Returns:
int: Amount of padding after saving
"""
high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data
low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data
if self.padding >= 0:
# enough padding left
if self.padding > high:
# padding too large, reduce
return low
# just use existing padding as is
return self.padding
else:
# not enough padding, add some
return low
def _get_padding(self, user_func):
if user_func is None:
return self.get_default_padding()
else:
return user_func(self)
def __repr__(self):
return "<%s size=%d padding=%d>" % (
type(self).__name__, self.size, self.padding)
class Tags(object):
"""`Tags` is the base class for many of the tag objects in Mutagen.
In many cases it has a dict like interface.
"""
__module__ = "mutagen"
def pprint(self):
"""
Returns:
text: tag information
"""
raise NotImplementedError
class Metadata(Tags):
"""Metadata(filething=None, **kwargs)
Args:
filething (filething): a filename or a file-like object or `None`
to create an empty instance (like ``ID3()``)
Like :class:`Tags` but for standalone tagging formats that are not
solely managed by a container format.
Provides methods to load, save and delete tags.
"""
__module__ = "mutagen"
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
@loadfile()
def load(self, filething, **kwargs):
raise NotImplementedError
@loadfile(writable=False)
def save(self, filething=None, **kwargs):
"""save(filething=None, **kwargs)
Save changes to a file.
Args:
filething (filething): or `None`
Raises:
MutagenError: if saving wasn't possible
"""
raise NotImplementedError
@loadfile(writable=False)
def delete(self, filething=None):
"""delete(filething=None)
Remove tags from a file.
In most cases this means any traces of the tag will be removed
from the file.
Args:
filething (filething): or `None`
Raises:
MutagenError: if deleting wasn't possible
"""
raise NotImplementedError
| 4,020
|
Python
|
.py
| 106
| 29.830189
| 78
| 0.626742
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,523
|
easyid3.py
|
rembo10_headphones/lib/mutagen/easyid3.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Easier access to ID3 tags.
EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
more like Vorbis or APEv2 tags.
"""
import mutagen.id3
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match, loadfile
from mutagen.id3 import ID3, error, delete, ID3FileType
__all__ = ['EasyID3', 'Open', 'delete']
class EasyID3KeyError(KeyError, ValueError, error):
"""Raised when trying to get/set an invalid key.
Subclasses both KeyError and ValueError for API compatibility,
catching KeyError is preferred.
"""
class EasyID3(DictMixin, Metadata):
"""EasyID3(filething=None)
A file with an ID3 tag.
Like Vorbis comments, EasyID3 keys are case-insensitive ASCII
strings. Only a subset of ID3 frames are supported by default. Use
EasyID3.RegisterKey and its wrappers to support more.
You can also set the GetFallback, SetFallback, and DeleteFallback
to generic key getter/setter/deleter functions, which are called
if no specific handler is registered for a key. Additionally,
ListFallback can be used to supply an arbitrary list of extra
keys. These can be set on EasyID3 or on individual instances after
creation.
To use an EasyID3 class with mutagen.mp3.MP3::
from mutagen.mp3 import EasyMP3 as MP3
MP3(filename)
Because many of the attributes are constructed on the fly, things
like the following will not work::
ezid3["performer"].append("Joe")
Instead, you must do::
values = ezid3["performer"]
values.append("Joe")
ezid3["performer"] = values
"""
Set = {}
Get = {}
Delete = {}
List = {}
# For compatibility.
valid_keys = Get
GetFallback = None
SetFallback = None
DeleteFallback = None
ListFallback = None
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an ID3 instance and
the requested key name. The setter also receives the desired
value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the ID3 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, frameid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of ID3 frame name to EasyID3 key, then you can use this
function::
EasyID3.RegisterTextKey("title", "TIT2")
"""
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value))
else:
frame.encoding = 3
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
for v in value:
if v and max(v) > u'\x7f':
enc = 3
break
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
def __init__(self, filename=None):
self.__id3 = ID3()
if filename is not None:
self.load(filename)
load = property(lambda s: s.__id3.load,
lambda s, v: setattr(s.__id3, 'load', v))
@loadfile(writable=True, create=True)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
See :meth:`mutagen.id3.ID3.save` for more info.
"""
if v2_version == 3:
# EasyID3 only works with v2.4 frames, so update_to_v23() would
# break things. We have to save a shallow copy of all tags
# and restore it after saving. Due to CHAP/CTOC copying has
# to be done recursively implemented in ID3Tags.
backup = self.__id3._copy()
try:
self.__id3.update_to_v23()
self.__id3.save(
filething, v1=v1, v2_version=v2_version, v23_sep=v23_sep,
padding=padding)
finally:
self.__id3._restore(backup)
else:
self.__id3.save(filething, v1=v1, v2_version=v2_version,
v23_sep=v23_sep, padding=padding)
delete = property(lambda s: s.__id3.delete,
lambda s, v: setattr(s.__id3, 'delete', v))
filename = property(lambda s: s.__id3.filename,
lambda s, fn: setattr(s.__id3, 'filename', fn))
@property
def size(self):
return self.__id3.size
def __getitem__(self, key):
func = dict_match(self.Get, key.lower(), self.GetFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
if isinstance(value, str):
value = [value]
func = dict_match(self.Set, key.lower(), self.SetFallback)
if func is not None:
return func(self.__id3, key, value)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
func = dict_match(self.Delete, key.lower(), self.DeleteFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__id3, key))
elif key in self:
keys.append(key)
if self.ListFallback is not None:
keys.extend(self.ListFallback(self.__id3, ""))
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
Open = EasyID3
def genre_get(id3, key):
return id3["TCON"].genres
def genre_set(id3, key, value):
try:
frame = id3["TCON"]
except KeyError:
id3.add(mutagen.id3.TCON(encoding=3, text=value))
else:
frame.encoding = 3
frame.genres = value
def genre_delete(id3, key):
del(id3["TCON"])
def date_get(id3, key):
return [stamp.text for stamp in id3["TDRC"].text]
def date_set(id3, key, value):
id3.add(mutagen.id3.TDRC(encoding=3, text=value))
def date_delete(id3, key):
del(id3["TDRC"])
def original_date_get(id3, key):
return [stamp.text for stamp in id3["TDOR"].text]
def original_date_set(id3, key, value):
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
def original_date_delete(id3, key):
del(id3["TDOR"])
def performer_get(id3, key):
people = []
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
for role, person in mcl.people:
if role == wanted_role:
people.append(person)
if people:
return people
else:
raise KeyError(key)
def performer_set(id3, key, value):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
mcl = mutagen.id3.TMCL(encoding=3, people=[])
id3.add(mcl)
mcl.encoding = 3
people = [p for p in mcl.people if p[0] != wanted_role]
for v in value:
people.append((wanted_role, v))
mcl.people = people
def performer_delete(id3, key):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
people = [p for p in mcl.people if p[0] != wanted_role]
if people == mcl.people:
raise KeyError(key)
elif people:
mcl.people = people
else:
del(id3["TMCL"])
def performer_list(id3, key):
try:
mcl = id3["TMCL"]
except KeyError:
return []
else:
return list(set("performer:" + p[0] for p in mcl.people))
def musicbrainz_trackid_get(id3, key):
return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')]
def musicbrainz_trackid_set(id3, key, value):
if len(value) != 1:
raise ValueError("only one track ID may be set per song")
value = value[0].encode('ascii')
try:
frame = id3["UFID:http://musicbrainz.org"]
except KeyError:
frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value)
id3.add(frame)
else:
frame.data = value
def musicbrainz_trackid_delete(id3, key):
del(id3["UFID:http://musicbrainz.org"])
def website_get(id3, key):
urls = [frame.url for frame in id3.getall("WOAR")]
if urls:
return urls
else:
raise EasyID3KeyError(key)
def website_set(id3, key, value):
id3.delall("WOAR")
for v in value:
id3.add(mutagen.id3.WOAR(url=v))
def website_delete(id3, key):
id3.delall("WOAR")
def gain_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%+f dB" % frame.gain]
def gain_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one gain value, not %r.", value)
gain = float(value[0].split()[0])
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.gain = gain
def gain_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.peak:
frame.gain = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peak_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%f" % frame.peak]
def peak_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one peak value, not %r.", value)
peak = float(value[0])
if peak >= 2 or peak < 0:
raise ValueError("peak must be => 0 and < 2.")
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.peak = peak
def peak_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.gain:
frame.peak = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peakgain_list(id3, key):
keys = []
for frame in id3.getall("RVA2"):
keys.append("replaygain_%s_gain" % frame.desc)
keys.append("replaygain_%s_peak" % frame.desc)
return keys
for frameid, key in {
"TALB": "album",
"TBPM": "bpm",
"TCMP": "compilation", # iTunes extension
"TCOM": "composer",
"TCOP": "copyright",
"TENC": "encodedby",
"TEXT": "lyricist",
"TLEN": "length",
"TMED": "media",
"TMOO": "mood",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
"TPE2": "albumartist",
"TPE3": "conductor",
"TPE4": "arranger",
"TPOS": "discnumber",
"TPUB": "organization",
"TRCK": "tracknumber",
"TOLY": "author",
"TSO2": "albumartistsort", # iTunes extension
"TSOA": "albumsort",
"TSOC": "composersort", # iTunes extension
"TSOP": "artistsort",
"TSOT": "titlesort",
"TSRC": "isrc",
"TSST": "discsubtitle",
"TLAN": "language",
}.items():
EasyID3.RegisterTextKey(key, frameid)
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
EasyID3.RegisterKey("date", date_get, date_set, date_delete)
EasyID3.RegisterKey("originaldate", original_date_get, original_date_set,
original_date_delete)
EasyID3.RegisterKey(
"performer:*", performer_get, performer_set, performer_delete,
performer_list)
EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get,
musicbrainz_trackid_set, musicbrainz_trackid_delete)
EasyID3.RegisterKey("website", website_get, website_set, website_delete)
EasyID3.RegisterKey(
"replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list)
EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
# At various times, information for this came from
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
for desc, key in {
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
u"MusicBrainz TRM Id": "musicbrainz_trmid",
u"MusicIP PUID": "musicip_puid",
u"MusicMagic Fingerprint": "musicip_fingerprint",
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
u"MusicBrainz Album Type": "musicbrainz_albumtype",
u"MusicBrainz Album Release Country": "releasecountry",
u"MusicBrainz Disc Id": "musicbrainz_discid",
u"ASIN": "asin",
u"ALBUMARTISTSORT": "albumartistsort",
u"PERFORMER": "performer",
u"BARCODE": "barcode",
u"CATALOGNUMBER": "catalognumber",
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
u"MusicBrainz Work Id": "musicbrainz_workid",
u"Acoustid Fingerprint": "acoustid_fingerprint",
u"Acoustid Id": "acoustid_id",
}.items():
EasyID3.RegisterTXXXKey(key, desc)
class EasyID3FileType(ID3FileType):
"""EasyID3FileType(filething=None)
Like ID3FileType, but uses EasyID3 for tags.
Arguments:
filething (filething)
Attributes:
tags (`EasyID3`)
"""
ID3 = EasyID3
| 15,841
|
Python
|
.py
| 437
| 28.819222
| 78
| 0.615505
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,524
|
dsdiff.py
|
rembo10_headphones/lib/mutagen/dsdiff.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""DSDIFF audio stream information and tags."""
import struct
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffID3,
IffFile,
InvalidChunk,
error as IffError,
)
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["DSDIFF", "Open", "delete"]
class error(IffError):
pass
# See
# https://dsd-guide.com/sites/default/files/white-papers/DSDIFF_1.5_Spec.pdf
class DSDIFFChunk(IffChunk):
"""Representation of a single DSDIFF chunk"""
HEADER_SIZE = 12
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sQ', header)
@classmethod
def get_class(cls, id):
if id in DSDIFFListChunk.LIST_CHUNK_IDS:
return DSDIFFListChunk
elif id == 'DST':
return DSTChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(struct.pack('>4sQ', id_, size))
def write_size(self):
self._fileobj.write(struct.pack('>Q', self.data_size))
class DSDIFFListChunk(DSDIFFChunk, IffContainerChunkMixin):
"""A DSDIFF chunk containing other chunks.
"""
LIST_CHUNK_IDS = ['FRM8', 'PROP']
def parse_next_subchunk(self):
return DSDIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id not in self.LIST_CHUNK_IDS:
raise InvalidChunk('Not a list chunk: %s' % id)
DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class DSTChunk(DSDIFFChunk, IffContainerChunkMixin):
"""A DSDIFF chunk containing other chunks.
"""
def parse_next_subchunk(self):
return DSDIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != 'DST':
raise InvalidChunk('Not a DST chunk: %s' % id)
DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container(name_size=0)
class DSDIFFFile(IffFile):
"""Representation of a DSDIFF file"""
def __init__(self, fileobj):
super().__init__(DSDIFFChunk, fileobj)
if self.root.id != u'FRM8':
raise InvalidChunk("Root chunk must be a FRM8 chunk, got %r"
% self.root)
class DSDIFFInfo(StreamInfo):
"""DSDIFF stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): audio sample size (for DSD this is always 1)
bitrate (`int`): audio bitrate, in bits per second
compression (`str`): DSD (uncompressed) or DST
"""
channels = 0
length = 0
sample_rate = 0
bits_per_sample = 1
bitrate = 0
compression = None
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
iff = DSDIFFFile(fileobj)
try:
prop_chunk = iff['PROP']
except KeyError as e:
raise error(str(e))
if prop_chunk.name == 'SND ':
for chunk in prop_chunk.subchunks():
if chunk.id == 'FS' and chunk.data_size == 4:
data = chunk.read()
if len(data) < 4:
raise InvalidChunk("Not enough data in FS chunk")
self.sample_rate, = struct.unpack('>L', data[:4])
elif chunk.id == 'CHNL' and chunk.data_size >= 2:
data = chunk.read()
if len(data) < 2:
raise InvalidChunk("Not enough data in CHNL chunk")
self.channels, = struct.unpack('>H', data[:2])
elif chunk.id == 'CMPR' and chunk.data_size >= 4:
data = chunk.read()
if len(data) < 4:
raise InvalidChunk("Not enough data in CMPR chunk")
compression_id, = struct.unpack('>4s', data[:4])
self.compression = compression_id.decode('ascii').rstrip()
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.compression == 'DSD': # not compressed
try:
dsd_chunk = iff['DSD']
except KeyError as e:
raise error(str(e))
# DSD data has one bit per sample. Eight samples of a channel
# are clustered together for a channel byte. For multiple channels
# the channel bytes are interleaved (in the order specified in the
# CHNL chunk). See DSDIFF spec chapter 3.3.
sample_count = dsd_chunk.data_size * 8 / (self.channels or 1)
if self.sample_rate != 0:
self.length = sample_count / float(self.sample_rate)
self.bitrate = (self.channels * self.bits_per_sample
* self.sample_rate)
elif self.compression == 'DST':
try:
dst_frame = iff['DST']
dst_frame_info = dst_frame['FRTE']
except KeyError as e:
raise error(str(e))
if dst_frame_info.data_size >= 6:
data = dst_frame_info.read()
if len(data) < 6:
raise InvalidChunk("Not enough data in FRTE chunk")
frame_count, frame_rate = struct.unpack('>LH', data[:6])
if frame_rate:
self.length = frame_count / frame_rate
if frame_count:
dst_data_size = dst_frame.data_size - dst_frame_info.size
avg_frame_size = dst_data_size / frame_count
self.bitrate = avg_frame_size * 8 * frame_rate
def pprint(self):
return u"%d channel DSDIFF (%s) @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.compression, self.bitrate, self.sample_rate,
self.length)
class _DSDIFFID3(IffID3):
"""A DSDIFF file with ID3v2 tags"""
def _load_file(self, fileobj):
return DSDIFFFile(fileobj)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the DSDIFF file"""
try:
del DSDIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass
class DSDIFF(FileType):
"""DSDIFF(filething)
An DSDIFF audio file.
For tagging ID3v2 data is added to a chunk with the ID "ID3 ".
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`DSDIFFInfo`)
"""
_mimes = ["audio/x-dff"]
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
fileobj = filething.fileobj
try:
self.tags = _DSDIFFID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
fileobj.seek(0, 0)
self.info = DSDIFFInfo(fileobj)
def add_tags(self):
"""Add empty ID3 tags to the file."""
if self.tags is None:
self.tags = _DSDIFFID3()
else:
raise error("an ID3 tag already exists")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"FRM8") * 2 + endswith(filename, ".dff")
Open = DSDIFF
| 7,989
|
Python
|
.py
| 202
| 30.133663
| 78
| 0.591686
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,525
|
_specs.py
|
rembo10_headphones/lib/mutagen/id3/_specs.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
import codecs
from struct import unpack, pack
from .._util import total_ordering, decode_terminated, enum, flags, \
cdata, encode_endian, intround, bchr
from ._util import BitPaddedInt, is_valid_frame_id
@enum
class PictureType(object):
"""Enumeration of image types defined by the ID3 standard for the APIC
frame, but also reused in WMA/FLAC/VorbisComment.
"""
OTHER = 0
"""Other"""
FILE_ICON = 1
"""32x32 pixels 'file icon' (PNG only)"""
OTHER_FILE_ICON = 2
"""Other file icon"""
COVER_FRONT = 3
"""Cover (front)"""
COVER_BACK = 4
"""Cover (back)"""
LEAFLET_PAGE = 5
"""Leaflet page"""
MEDIA = 6
"""Media (e.g. label side of CD)"""
LEAD_ARTIST = 7
"""Lead artist/lead performer/soloist"""
ARTIST = 8
"""Artist/performer"""
CONDUCTOR = 9
"""Conductor"""
BAND = 10
"""Band/Orchestra"""
COMPOSER = 11
"""Composer"""
LYRICIST = 12
"""Lyricist/text writer"""
RECORDING_LOCATION = 13
"""Recording Location"""
DURING_RECORDING = 14
"""During recording"""
DURING_PERFORMANCE = 15
"""During performance"""
SCREEN_CAPTURE = 16
"""Movie/video screen capture"""
FISH = 17
"""A bright coloured fish"""
ILLUSTRATION = 18
"""Illustration"""
BAND_LOGOTYPE = 19
"""Band/artist logotype"""
PUBLISHER_LOGOTYPE = 20
"""Publisher/Studio logotype"""
def _pprint(self):
return str(self).split(".", 1)[-1].lower().replace("_", " ")
@flags
class CTOCFlags(object):
TOP_LEVEL = 0x2
"""Identifies the CTOC root frame"""
ORDERED = 0x1
"""Child elements are ordered"""
class SpecError(Exception):
pass
class Spec(object):
handle_nodata = False
"""If reading empty data is possible and writing it back will again
result in no data.
"""
def __init__(self, name, default):
self.name = name
self.default = default
def __hash__(self):
raise TypeError("Spec objects are unhashable")
def _validate23(self, frame, value, **kwargs):
"""Return a possibly modified value which, if written,
results in valid id3v2.3 data.
"""
return value
def read(self, header, frame, data):
"""
Returns:
(value: object, left_data: bytes)
Raises:
SpecError
"""
raise NotImplementedError
def write(self, config, frame, value):
"""
Returns:
bytes: The serialized data
Raises:
SpecError
"""
raise NotImplementedError
def validate(self, frame, value):
"""
Returns:
the validated value
Raises:
ValueError
TypeError
"""
raise NotImplementedError
class ByteSpec(Spec):
def __init__(self, name, default=0):
super(ByteSpec, self).__init__(name, default)
def read(self, header, frame, data):
return bytearray(data)[0], data[1:]
def write(self, config, frame, value):
return bchr(value)
def validate(self, frame, value):
if value is not None:
bchr(value)
return value
class PictureTypeSpec(ByteSpec):
def __init__(self, name, default=PictureType.COVER_FRONT):
super(PictureTypeSpec, self).__init__(name, default)
def read(self, header, frame, data):
value, data = ByteSpec.read(self, header, frame, data)
return PictureType(value), data
def validate(self, frame, value):
value = ByteSpec.validate(self, frame, value)
if value is not None:
return PictureType(value)
return value
class CTOCFlagsSpec(ByteSpec):
def read(self, header, frame, data):
value, data = ByteSpec.read(self, header, frame, data)
return CTOCFlags(value), data
def validate(self, frame, value):
value = ByteSpec.validate(self, frame, value)
if value is not None:
return CTOCFlags(value)
return value
class IntegerSpec(Spec):
def read(self, header, frame, data):
return int(BitPaddedInt(data, bits=8)), b''
def write(self, config, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=-1)
def validate(self, frame, value):
return value
class SizedIntegerSpec(Spec):
def __init__(self, name, size, default):
self.name, self.__sz = name, size
self.default = default
def read(self, header, frame, data):
return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:]
def write(self, config, frame, value):
return BitPaddedInt.to_str(value, bits=8, width=self.__sz)
def validate(self, frame, value):
return value
@enum
class Encoding(object):
"""Text Encoding"""
LATIN1 = 0
"""ISO-8859-1"""
UTF16 = 1
"""UTF-16 with BOM"""
UTF16BE = 2
"""UTF-16BE without BOM"""
UTF8 = 3
"""UTF-8"""
class EncodingSpec(ByteSpec):
def __init__(self, name, default=Encoding.UTF16):
super(EncodingSpec, self).__init__(name, default)
def read(self, header, frame, data):
enc, data = super(EncodingSpec, self).read(header, frame, data)
if enc not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE,
Encoding.UTF8):
raise SpecError('Invalid Encoding: %r' % enc)
return Encoding(enc), data
def validate(self, frame, value):
if value is None:
raise TypeError
if value not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE,
Encoding.UTF8):
raise ValueError('Invalid Encoding: %r' % value)
return Encoding(value)
def _validate23(self, frame, value, **kwargs):
# only 0, 1 are valid in v2.3, default to utf-16
if value not in (Encoding.LATIN1, Encoding.UTF16):
value = Encoding.UTF16
return value
class StringSpec(Spec):
"""A fixed size ASCII only payload."""
def __init__(self, name, length, default=None):
if default is None:
default = u" " * length
super(StringSpec, self).__init__(name, default)
self.len = length
def read(s, header, frame, data):
chunk = data[:s.len]
try:
ascii = chunk.decode("ascii")
except UnicodeDecodeError:
raise SpecError("not ascii")
else:
chunk = ascii
return chunk, data[s.len:]
def write(self, config, frame, value):
value = value.encode("ascii")
return (bytes(value) + b'\x00' * self.len)[:self.len]
def validate(self, frame, value):
if value is None:
raise TypeError
if not isinstance(value, str):
raise TypeError("%s has to be str" % self.name)
value.encode("ascii")
if len(value) == self.len:
return value
raise ValueError('Invalid StringSpec[%d] data: %r' % (self.len, value))
class RVASpec(Spec):
def __init__(self, name, stereo_only, default=[0, 0]):
# two_chan: RVA has only 2 channels, while RVAD has 6 channels
super(RVASpec, self).__init__(name, default)
self._max_values = 4 if stereo_only else 12
def read(self, header, frame, data):
# inc/dec flags
spec = ByteSpec("flags", 0)
flags, data = spec.read(header, frame, data)
if not data:
raise SpecError("truncated")
# how many bytes per value
bits, data = spec.read(header, frame, data)
if bits == 0:
# not allowed according to spec
raise SpecError("bits used has to be > 0")
bytes_per_value = (bits + 7) // 8
values = []
while len(data) >= bytes_per_value and len(values) < self._max_values:
v = BitPaddedInt(data[:bytes_per_value], bits=8)
data = data[bytes_per_value:]
values.append(v)
if len(values) < 2:
raise SpecError("First two values not optional")
# if the respective flag bit is zero, take as decrement
for bit, index in enumerate([0, 1, 4, 5, 8, 10]):
if not cdata.test_bit(flags, bit):
try:
values[index] = -values[index]
except IndexError:
break
return values, data
def write(self, config, frame, values):
if len(values) < 2 or len(values) > self._max_values:
raise SpecError(
"at least two volume change values required, max %d" %
self._max_values)
spec = ByteSpec("flags", 0)
flags = 0
values = list(values)
for bit, index in enumerate([0, 1, 4, 5, 8, 10]):
try:
if values[index] < 0:
values[index] = -values[index]
else:
flags |= (1 << bit)
except IndexError:
break
buffer_ = bytearray()
buffer_.extend(spec.write(config, frame, flags))
# serialized and make them all the same size (min 2 bytes)
byte_values = [
BitPaddedInt.to_str(v, bits=8, width=-1, minwidth=2)
for v in values]
max_bytes = max([len(v) for v in byte_values])
byte_values = [v.ljust(max_bytes, b"\x00") for v in byte_values]
bits = max_bytes * 8
buffer_.extend(spec.write(config, frame, bits))
for v in byte_values:
buffer_.extend(v)
return bytes(buffer_)
def validate(self, frame, values):
if len(values) < 2 or len(values) > self._max_values:
raise ValueError("needs list of length 2..%d" % self._max_values)
return values
class FrameIDSpec(StringSpec):
def __init__(self, name, length):
super(FrameIDSpec, self).__init__(name, length, u"X" * length)
def validate(self, frame, value):
value = super(FrameIDSpec, self).validate(frame, value)
if not is_valid_frame_id(value):
raise ValueError("Invalid frame ID")
return value
class BinaryDataSpec(Spec):
handle_nodata = True
def __init__(self, name, default=b""):
super(BinaryDataSpec, self).__init__(name, default)
def read(self, header, frame, data):
return data, b''
def write(self, config, frame, value):
if isinstance(value, bytes):
return value
value = str(value).encode("ascii")
return value
def validate(self, frame, value):
if value is None:
raise TypeError
if isinstance(value, bytes):
return value
else:
raise TypeError("%s has to be bytes" % self.name)
value = str(value).encode("ascii")
return value
def iter_text_fixups(data, encoding):
"""Yields a series of repaired text values for decoding"""
yield data
if encoding == Encoding.UTF16BE:
# wrong termination
yield data + b"\x00"
elif encoding == Encoding.UTF16:
# wrong termination
yield data + b"\x00"
# utf-16 is missing BOM, content is usually utf-16-le
yield codecs.BOM_UTF16_LE + data
# both cases combined
yield codecs.BOM_UTF16_LE + data + b"\x00"
class EncodedTextSpec(Spec):
_encodings = {
Encoding.LATIN1: ('latin1', b'\x00'),
Encoding.UTF16: ('utf16', b'\x00\x00'),
Encoding.UTF16BE: ('utf_16_be', b'\x00\x00'),
Encoding.UTF8: ('utf8', b'\x00'),
}
def __init__(self, name, default=u""):
super(EncodedTextSpec, self).__init__(name, default)
def read(self, header, frame, data):
enc, term = self._encodings[frame.encoding]
err = None
for data in iter_text_fixups(data, frame.encoding):
try:
value, data = decode_terminated(data, enc, strict=False)
except ValueError as e:
err = e
else:
# Older id3 did not support multiple values, but we still
# read them. To not missinterpret zero padded values with
# a list of empty strings, stop if everything left is zero.
# https://github.com/quodlibet/mutagen/issues/276
if header.version < header._V24 and not data.strip(b"\x00"):
data = b""
return value, data
raise SpecError(err)
def write(self, config, frame, value):
enc, term = self._encodings[frame.encoding]
try:
return encode_endian(value, enc, le=True) + term
except UnicodeEncodeError as e:
raise SpecError(e)
def validate(self, frame, value):
return str(value)
class MultiSpec(Spec):
def __init__(self, name, *specs, **kw):
super(MultiSpec, self).__init__(name, default=kw.get('default'))
self.specs = specs
self.sep = kw.get('sep')
def read(self, header, frame, data):
values = []
while data:
record = []
for spec in self.specs:
value, data = spec.read(header, frame, data)
record.append(value)
if len(self.specs) != 1:
values.append(record)
else:
values.append(record[0])
return values, data
def write(self, config, frame, value):
data = []
if len(self.specs) == 1:
for v in value:
data.append(self.specs[0].write(config, frame, v))
else:
for record in value:
for v, s in zip(record, self.specs):
data.append(s.write(config, frame, v))
return b''.join(data)
def validate(self, frame, value):
if self.sep and isinstance(value, str):
value = value.split(self.sep)
if isinstance(value, list):
if len(self.specs) == 1:
return [self.specs[0].validate(frame, v) for v in value]
else:
return [
[s.validate(frame, v) for (v, s) in zip(val, self.specs)]
for val in value]
raise ValueError('Invalid MultiSpec data: %r' % value)
def _validate23(self, frame, value, **kwargs):
if len(self.specs) != 1:
return [[s._validate23(frame, v, **kwargs)
for (v, s) in zip(val, self.specs)]
for val in value]
spec = self.specs[0]
# Merge single text spec multispecs only.
# (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame)
if not isinstance(spec, EncodedTextSpec) or \
isinstance(spec, TimeStampSpec):
return value
value = [spec._validate23(frame, v, **kwargs) for v in value]
if kwargs.get("sep") is not None:
return [spec.validate(frame, kwargs["sep"].join(value))]
return value
class EncodedNumericTextSpec(EncodedTextSpec):
pass
class EncodedNumericPartTextSpec(EncodedTextSpec):
pass
class Latin1TextSpec(Spec):
def __init__(self, name, default=u""):
super(Latin1TextSpec, self).__init__(name, default)
def read(self, header, frame, data):
if b'\x00' in data:
data, ret = data.split(b'\x00', 1)
else:
ret = b''
return data.decode('latin1'), ret
def write(self, config, data, value):
return value.encode('latin1') + b'\x00'
def validate(self, frame, value):
return str(value)
class ID3FramesSpec(Spec):
handle_nodata = True
def __init__(self, name, default=[]):
super(ID3FramesSpec, self).__init__(name, default)
def read(self, header, frame, data):
from ._tags import ID3Tags
tags = ID3Tags()
return tags, tags._read(header, data)
def _validate23(self, frame, value, **kwargs):
from ._tags import ID3Tags
v = ID3Tags()
for frame in value.values():
v.add(frame._get_v23_frame(**kwargs))
return v
def write(self, config, frame, value):
return bytes(value._write(config))
def validate(self, frame, value):
from ._tags import ID3Tags
if isinstance(value, ID3Tags):
return value
tags = ID3Tags()
for v in value:
tags.add(v)
return tags
class Latin1TextListSpec(Spec):
def __init__(self, name, default=[]):
super(Latin1TextListSpec, self).__init__(name, default)
self._bspec = ByteSpec("entry_count", default=0)
self._lspec = Latin1TextSpec("child_element_id")
def read(self, header, frame, data):
count, data = self._bspec.read(header, frame, data)
entries = []
for i in range(count):
entry, data = self._lspec.read(header, frame, data)
entries.append(entry)
return entries, data
def write(self, config, frame, value):
b = self._bspec.write(config, frame, len(value))
for v in value:
b += self._lspec.write(config, frame, v)
return b
def validate(self, frame, value):
return [self._lspec.validate(frame, v) for v in value]
@total_ordering
class ID3TimeStamp(object):
"""A time stamp in ID3v2 format.
This is a restricted form of the ISO 8601 standard; time stamps
take the form of:
YYYY-MM-DD HH:MM:SS
Or some partial form (YYYY-MM-DD HH, YYYY, etc.).
The 'text' attribute contains the raw text data of the time stamp.
"""
import re
def __init__(self, text):
if isinstance(text, ID3TimeStamp):
text = text.text
elif not isinstance(text, str):
raise TypeError("not a str")
self.text = text
__formats = ['%04d'] + ['%02d'] * 5
__seps = ['-', '-', ' ', ':', ':', 'x']
def get_text(self):
parts = [self.year, self.month, self.day,
self.hour, self.minute, self.second]
pieces = []
for i, part in enumerate(parts):
if part is None:
break
pieces.append(self.__formats[i] % part + self.__seps[i])
return u''.join(pieces)[:-1]
def set_text(self, text, splitre=re.compile('[-T:/.]|\\s+')):
year, month, day, hour, minute, second = \
splitre.split(text + ':::::')[:6]
for a in 'year month day hour minute second'.split():
try:
v = int(locals()[a])
except ValueError:
v = None
setattr(self, a, v)
text = property(get_text, set_text, doc="ID3v2.4 date and time.")
def __str__(self):
return self.text
def __bytes__(self):
return self.text.encode("utf-8")
def __repr__(self):
return repr(self.text)
def __eq__(self, other):
return isinstance(other, ID3TimeStamp) and self.text == other.text
def __lt__(self, other):
return self.text < other.text
__hash__ = object.__hash__
def encode(self, *args):
return self.text.encode(*args)
class TimeStampSpec(EncodedTextSpec):
def read(self, header, frame, data):
value, data = super(TimeStampSpec, self).read(header, frame, data)
return self.validate(frame, value), data
def write(self, config, frame, data):
return super(TimeStampSpec, self).write(config, frame,
data.text.replace(' ', 'T'))
def validate(self, frame, value):
try:
return ID3TimeStamp(value)
except TypeError:
raise ValueError("Invalid ID3TimeStamp: %r" % value)
class ChannelSpec(ByteSpec):
(OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE,
BACKCENTRE, SUBWOOFER) = range(9)
class VolumeAdjustmentSpec(Spec):
def read(self, header, frame, data):
value, = unpack('>h', data[0:2])
return value / 512.0, data[2:]
def write(self, config, frame, value):
number = intround(value * 512)
# pack only fails in 2.7, do it manually in 2.6
if not -32768 <= number <= 32767:
raise SpecError("not in range")
return pack('>h', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(None, frame, value)
except SpecError:
raise ValueError("out of range")
return value
class VolumePeakSpec(Spec):
def read(self, header, frame, data):
# http://bugs.xmms.org/attachment.cgi?id=113&action=view
peak = 0
data_array = bytearray(data)
bits = data_array[0]
vol_bytes = min(4, (bits + 7) >> 3)
# not enough frame data
if vol_bytes + 1 > len(data):
raise SpecError("not enough frame data")
shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8
for i in range(1, vol_bytes + 1):
peak *= 256
peak += data_array[i]
peak *= 2 ** shift
return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:]
def write(self, config, frame, value):
number = intround(value * 32768)
# pack only fails in 2.7, do it manually in 2.6
if not 0 <= number <= 65535:
raise SpecError("not in range")
# always write as 16 bits for sanity.
return b"\x10" + pack('>H', number)
def validate(self, frame, value):
if value is not None:
try:
self.write(None, frame, value)
except SpecError:
raise ValueError("out of range")
return value
class SynchronizedTextSpec(EncodedTextSpec):
def read(self, header, frame, data):
texts = []
encoding, term = self._encodings[frame.encoding]
while data:
try:
value, data = decode_terminated(data, encoding)
except ValueError:
raise SpecError("decoding error")
if len(data) < 4:
raise SpecError("not enough data")
time, = struct.unpack(">I", data[:4])
texts.append((value, time))
data = data[4:]
return texts, b""
def write(self, config, frame, value):
data = []
encoding, term = self._encodings[frame.encoding]
for text, time in value:
try:
text = encode_endian(text, encoding, le=True) + term
except UnicodeEncodeError as e:
raise SpecError(e)
data.append(text + struct.pack(">I", time))
return b"".join(data)
def validate(self, frame, value):
return value
class KeyEventSpec(Spec):
def read(self, header, frame, data):
events = []
while len(data) >= 5:
events.append(struct.unpack(">bI", data[:5]))
data = data[5:]
return events, data
def write(self, config, frame, value):
return b"".join(struct.pack(">bI", *event) for event in value)
def validate(self, frame, value):
return list(value)
class VolumeAdjustmentsSpec(Spec):
# Not to be confused with VolumeAdjustmentSpec.
def read(self, header, frame, data):
adjustments = {}
while len(data) >= 4:
freq, adj = struct.unpack(">Hh", data[:4])
data = data[4:]
freq /= 2.0
adj /= 512.0
adjustments[freq] = adj
adjustments = sorted(adjustments.items())
return adjustments, data
def write(self, config, frame, value):
value.sort()
return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512))
for (freq, adj) in value)
def validate(self, frame, value):
return list(value)
class ASPIIndexSpec(Spec):
def read(self, header, frame, data):
if frame.b == 16:
format = "H"
size = 2
elif frame.b == 8:
format = "B"
size = 1
else:
raise SpecError("invalid bit count in ASPI (%d)" % frame.b)
indexes = data[:frame.N * size]
data = data[frame.N * size:]
try:
return list(struct.unpack(">" + format * frame.N, indexes)), data
except struct.error as e:
raise SpecError(e)
def write(self, config, frame, values):
if frame.b == 16:
format = "H"
elif frame.b == 8:
format = "B"
else:
raise SpecError("frame.b must be 8 or 16")
try:
return struct.pack(">" + format * frame.N, *values)
except struct.error as e:
raise SpecError(e)
def validate(self, frame, values):
return list(values)
| 25,175
|
Python
|
.py
| 668
| 28.583832
| 79
| 0.578835
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,526
|
_util.py
|
rembo10_headphones/lib/mutagen/id3/_util.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2013 Christoph Reiter
# 2014 Ben Ockmore
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from mutagen._util import MutagenError
def is_valid_frame_id(frame_id):
return frame_id.isalnum() and frame_id.isupper()
class ID3SaveConfig(object):
def __init__(self, v2_version=4, v23_separator=None):
assert v2_version in (3, 4)
self.v2_version = v2_version
self.v23_separator = v23_separator
class error(MutagenError):
pass
class ID3NoHeaderError(error, ValueError):
pass
class ID3UnsupportedVersionError(error, NotImplementedError):
pass
class ID3EncryptionUnsupportedError(error, NotImplementedError):
pass
class ID3JunkFrameError(error):
pass
class unsynch(object):
@staticmethod
def decode(value):
fragments = bytearray(value).split(b'\xff')
if len(fragments) > 1 and not fragments[-1]:
raise ValueError('string ended unsafe')
for f in fragments[1:]:
if (not f) or (f[0] >= 0xE0):
raise ValueError('invalid sync-safe string')
if f[0] == 0x00:
del f[0]
return bytes(bytearray(b'\xff').join(fragments))
@staticmethod
def encode(value):
fragments = bytearray(value).split(b'\xff')
for f in fragments[1:]:
if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00):
f.insert(0, 0x00)
return bytes(bytearray(b'\xff').join(fragments))
class _BitPaddedMixin(object):
def as_str(self, width=4, minwidth=4):
return self.to_str(self, self.bits, self.bigendian, width, minwidth)
@staticmethod
def to_str(value, bits=7, bigendian=True, width=4, minwidth=4):
mask = (1 << bits) - 1
if width != -1:
index = 0
bytes_ = bytearray(width)
try:
while value:
bytes_[index] = value & mask
value >>= bits
index += 1
except IndexError:
raise ValueError('Value too wide (>%d bytes)' % width)
else:
# PCNT and POPM use growing integers
# of at least 4 bytes (=minwidth) as counters.
bytes_ = bytearray()
append = bytes_.append
while value:
append(value & mask)
value >>= bits
bytes_ = bytes_.ljust(minwidth, b"\x00")
if bigendian:
bytes_.reverse()
return bytes(bytes_)
@staticmethod
def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero"""
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, int):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True
class BitPaddedInt(int, _BitPaddedMixin):
def __new__(cls, value, bits=7, bigendian=True):
mask = (1 << (bits)) - 1
numeric_value = 0
shift = 0
if isinstance(value, int):
if value < 0:
raise ValueError
while value:
numeric_value += (value & mask) << shift
value >>= 8
shift += bits
elif isinstance(value, bytes):
if bigendian:
value = reversed(value)
for byte in bytearray(value):
numeric_value += (byte & mask) << shift
shift += bits
else:
raise TypeError
self = int.__new__(BitPaddedInt, numeric_value)
self.bits = bits
self.bigendian = bigendian
return self
class ID3BadUnsynchData(error, ValueError):
"""Deprecated"""
class ID3BadCompressedData(error, ValueError):
"""Deprecated"""
class ID3TagError(error, ValueError):
"""Deprecated"""
class ID3Warning(error, UserWarning):
"""Deprecated"""
| 4,423
|
Python
|
.py
| 123
| 26.471545
| 76
| 0.571966
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,527
|
__init__.py
|
rembo10_headphones/lib/mutagen/id3/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""ID3v2 reading and writing.
This is based off of the following references:
* http://id3.org/id3v2.4.0-structure
* http://id3.org/id3v2.4.0-frames
* http://id3.org/id3v2.3.0
* http://id3.org/id3v2-00
* http://id3.org/ID3v1
Its largest deviation from the above (versions 2.3 and 2.2) is that it
will not interpret the / characters as a separator, and will almost
always accept null separators to generate multi-valued text frames.
Because ID3 frame structure differs between frame types, each frame is
implemented as a different class (e.g. TIT2 as mutagen.id3.TIT2). Each
frame's documentation contains a list of its attributes.
Since this file's documentation is a little unwieldy, you are probably
interested in the :class:`ID3` class to start with.
"""
from ._file import ID3, ID3FileType, delete, ID3v1SaveOptions
from ._specs import Encoding, PictureType, CTOCFlags, ID3TimeStamp
from ._frames import Frames, Frames_2_2, Frame, TextFrame, UrlFrame, \
UrlFrameU, TimeStampTextFrame, BinaryFrame, NumericPartTextFrame, \
NumericTextFrame, PairedTextFrame
from ._util import ID3NoHeaderError, error, ID3UnsupportedVersionError
from ._id3v1 import ParseID3v1, MakeID3v1
from ._tags import ID3Tags
from ._frames import (AENC, APIC, ASPI, BUF, CHAP, CNT, COM, COMM, COMR, CRA,
CRM, CTOC, ENCR, EQU2, ETC, ETCO, GEO, GEOB, GP1, GRID, GRP1, IPL, IPLS,
LINK, LNK, MCDI, MCI, MLL, MLLT, MVI, MVIN, MVN, MVNM, OWNE, PCNT, PCST,
PIC, POP, POPM, POSS, PRIV, RBUF, REV, RVA, RVA2, RVAD, RVRB, SEEK, SIGN,
SLT, STC, SYLT, SYTC, TAL, TALB, TBP, TBPM, TCAT, TCM, TCMP, TCO, TCOM,
TCON, TCOP, TCP, TCR, TDA, TDAT, TDEN, TDES, TDLY, TDOR, TDRC, TDRL, TDTG,
TDY, TEN, TENC, TEXT, TFLT, TFT, TGID, TIM, TIME, TIPL, TIT1, TIT2, TIT3,
TKE, TKEY, TKWD, TLA, TLAN, TLE, TLEN, TMCL, TMED, TMOO, TMT, TOA, TOAL,
TOF, TOFN, TOL, TOLY, TOPE, TOR, TORY, TOT, TOWN, TP1, TP2, TP3, TP4, TPA,
TPB, TPE1, TPE2, TPE3, TPE4, TPOS, TPRO, TPUB, TRC, TRCK, TRD, TRDA, TRK,
TRSN, TRSO, TS2, TSA, TSC, TSI, TSIZ, TSO2, TSOA, TSOC, TSOP, TSOT, TSP,
TSRC, TSS, TSSE, TSST, TST, TT1, TT2, TT3, TXT, TXX, TXXX, TYE, TYER, UFI,
UFID, ULT, USER, USLT, WAF, WAR, WAS, WCM, WCOM, WCOP, WCP, WFED, WOAF,
WOAR, WOAS, WORS, WPAY, WPB, WPUB, WXX, WXXX)
# deprecated
from ._util import ID3EncryptionUnsupportedError, ID3JunkFrameError, \
ID3BadUnsynchData, ID3BadCompressedData, ID3TagError, ID3Warning, \
BitPaddedInt as _BitPaddedIntForPicard
# support open(filename) as interface
Open = ID3
# flake8
ID3, ID3FileType, delete, ID3v1SaveOptions, Encoding, PictureType, CTOCFlags,
ID3TimeStamp, Frames, Frames_2_2, Frame, TextFrame, UrlFrame, UrlFrameU,
TimeStampTextFrame, BinaryFrame, NumericPartTextFrame, NumericTextFrame,
PairedTextFrame, ID3NoHeaderError, error, ID3UnsupportedVersionError,
ParseID3v1, MakeID3v1, ID3Tags, ID3EncryptionUnsupportedError,
ID3JunkFrameError, ID3BadUnsynchData, ID3BadCompressedData, ID3TagError,
ID3Warning
AENC, APIC, ASPI, BUF, CHAP, CNT, COM, COMM, COMR, CRA, CRM, CTOC, ENCR, EQU2,
ETC, ETCO, GEO, GEOB, GP1, GRID, GRP1, IPL, IPLS, LINK, LNK, MCDI, MCI, MLL,
MLLT, MVI, MVIN, MVN, MVNM, OWNE, PCNT, PCST, PIC, POP, POPM, POSS, PRIV,
RBUF, REV, RVA, RVA2, RVAD, RVRB, SEEK, SIGN, SLT, STC, SYLT, SYTC, TAL, TALB,
TBP, TBPM, TCAT, TCM, TCMP, TCO, TCOM, TCON, TCOP, TCP, TCR, TDA, TDAT, TDEN,
TDES, TDLY, TDOR, TDRC, TDRL, TDTG, TDY, TEN, TENC, TEXT, TFLT, TFT, TGID,
TIM, TIME, TIPL, TIT1, TIT2, TIT3, TKE, TKEY, TKWD, TLA, TLAN, TLE, TLEN,
TMCL, TMED, TMOO, TMT, TOA, TOAL, TOF, TOFN, TOL, TOLY, TOPE, TOR, TORY, TOT,
TOWN, TP1, TP2, TP3, TP4, TPA, TPB, TPE1, TPE2, TPE3, TPE4, TPOS, TPRO, TPUB,
TRC, TRCK, TRD, TRDA, TRK, TRSN, TRSO, TS2, TSA, TSC, TSI, TSIZ, TSO2, TSOA,
TSOC, TSOP, TSOT, TSP, TSRC, TSS, TSSE, TSST, TST, TT1, TT2, TT3, TXT, TXX,
TXXX, TYE, TYER, UFI, UFID, ULT, USER, USLT, WAF, WAR, WAS, WCM, WCOM, WCOP,
WCP, WFED, WOAF, WOAR, WOAS, WORS, WPAY, WPB, WPUB, WXX, WXXX
# Workaround for http://tickets.musicbrainz.org/browse/PICARD-833
class _DummySpecForPicard(object):
write = None
EncodedTextSpec = MultiSpec = _DummySpecForPicard
BitPaddedInt = _BitPaddedIntForPicard
__all__ = ['ID3', 'ID3FileType', 'Frames', 'Open', 'delete']
| 4,617
|
Python
|
.py
| 80
| 55.6125
| 78
| 0.723512
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,528
|
_file.py
|
rembo10_headphones/lib/mutagen/id3/_file.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
import mutagen
from mutagen._util import insert_bytes, delete_bytes, enum, \
loadfile, convert_error, read_full
from mutagen._tags import PaddingInfo
from ._util import error, ID3NoHeaderError, ID3UnsupportedVersionError, \
BitPaddedInt
from ._tags import ID3Tags, ID3Header, ID3SaveConfig
from ._id3v1 import MakeID3v1, find_id3v1
@enum
class ID3v1SaveOptions(object):
REMOVE = 0
"""ID3v1 tags will be removed"""
UPDATE = 1
"""ID3v1 tags will be updated but not added"""
CREATE = 2
"""ID3v1 tags will be created and/or updated"""
class ID3(ID3Tags, mutagen.Metadata):
"""ID3(filething=None)
A file with an ID3v2 tag.
If any arguments are given, the :meth:`load` is called with them. If no
arguments are given then an empty `ID3` object is created.
::
ID3("foo.mp3")
# same as
t = ID3()
t.load("foo.mp3")
Arguments:
filething (filething): or `None`
Attributes:
version (tuple[int]): ID3 tag version as a tuple
unknown_frames (list[bytes]): raw frame data of any unknown frames
found
size (int): the total size of the ID3 tag, including the header
"""
__module__ = "mutagen.id3"
PEDANTIC = True
"""`bool`:
.. deprecated:: 1.28
Doesn't have any effect
"""
filename = None
def __init__(self, *args, **kwargs):
self._header = None
self._version = (2, 4, 0)
super(ID3, self).__init__(*args, **kwargs)
@property
def version(self):
if self._header is not None:
return self._header.version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def f_unsynch(self):
if self._header is not None:
return self._header.f_unsynch
return False
@property
def f_extended(self):
if self._header is not None:
return self._header.f_extended
return False
@property
def size(self):
if self._header is not None:
return self._header.size
return 0
def _pre_load_header(self, fileobj):
# XXX: for aiff to adjust the offset..
pass
@convert_error(IOError, error)
@loadfile()
def load(self, filething, known_frames=None, translate=True, v2_version=4,
load_v1=True):
"""Load tags from a filename.
Args:
filename (filething): filename or file object to load tag data from
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
translate (bool): Update all tags to ID3v2.3/4 internally. If you
intend to save, this must be true or you have to
call update_to_v23() / update_to_v24() manually.
v2_version (int): if update_to_v23 or update_to_v24 get called
(3 or 4)
load_v1 (bool): Load tags from ID3v1 header if present. If both
ID3v1 and ID3v2 headers are present, combine the tags from
the two, with ID3v2 having precedence.
.. versionadded:: 1.42
Example of loading a custom frame::
my_frames = dict(mutagen.id3.Frames)
class XMYF(Frame): ...
my_frames["XMYF"] = XMYF
mutagen.id3.ID3(filename, known_frames=my_frames)
"""
fileobj = filething.fileobj
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
self.unknown_frames = []
self._header = None
self._padding = 0
self._pre_load_header(fileobj)
try:
self._header = ID3Header(fileobj)
except (ID3NoHeaderError, ID3UnsupportedVersionError):
if not load_v1:
raise
frames, offset = find_id3v1(fileobj, v2_version, known_frames)
if frames is None:
raise
self.version = ID3Header._V11
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
else:
# XXX: attach to the header object so we have it in spec parsing..
if known_frames is not None:
self._header._known_frames = known_frames
data = read_full(fileobj, self.size - 10)
remaining_data = self._read(self._header, data)
self._padding = len(remaining_data)
if load_v1:
v1v2_ver = 4 if self.version[1] == 4 else 3
frames, offset = find_id3v1(fileobj, v1v2_ver, known_frames)
if frames:
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
if translate:
if v2_version == 3:
self.update_to_v23()
else:
self.update_to_v24()
def _prepare_data(self, fileobj, start, available, v2_version, v23_sep,
pad_func):
if v2_version not in (3, 4):
raise ValueError("Only 3 or 4 allowed for v2_version")
config = ID3SaveConfig(v2_version, v23_sep)
framedata = self._write(config)
needed = len(framedata) + 10
fileobj.seek(0, 2)
trailing_size = fileobj.tell() - start
info = PaddingInfo(available - needed, trailing_size)
new_padding = info._get_padding(pad_func)
if new_padding < 0:
raise error("invalid padding")
new_size = needed + new_padding
new_framesize = BitPaddedInt.to_str(new_size - 10, width=4)
header = struct.pack(
'>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize)
data = header + framedata
assert new_size >= len(data)
data += (new_size - len(data)) * b'\x00'
assert new_size == len(data)
return data
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional.
"""
f = filething.fileobj
try:
header = ID3Header(filething.fileobj)
except ID3NoHeaderError:
old_size = 0
else:
old_size = header.size
data = self._prepare_data(
f, 0, old_size, v2_version, v23_sep, padding)
new_size = len(data)
if (old_size < new_size):
insert_bytes(f, new_size - old_size, old_size)
elif (old_size > new_size):
delete_bytes(f, old_size - new_size, new_size)
f.seek(0)
f.write(data)
self.__save_v1(f, v1)
def __save_v1(self, f, v1):
tag, offset = find_id3v1(f)
has_v1 = tag is not None
f.seek(offset, 2)
if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \
v1 == ID3v1SaveOptions.CREATE:
f.write(MakeID3v1(self))
else:
f.truncate()
@loadfile(writable=True)
def delete(self, filething=None, delete_v1=True, delete_v2=True):
"""delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.
Args:
filething (filething): A filename or `None` to use the one used
when loading.
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
If no filename is given, the one most recently loaded is used.
"""
delete(filething, delete_v1, delete_v2)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething, delete_v1=True, delete_v2=True):
"""Remove tags from a file.
Args:
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
Raises:
mutagen.MutagenError: In case deleting failed
"""
f = filething.fileobj
if delete_v1:
tag, offset = find_id3v1(f)
if tag is not None:
f.seek(offset, 2)
f.truncate()
# technically an insize=0 tag is invalid, but we delete it anyway
# (primarily because we used to write it)
if delete_v2:
f.seek(0, 0)
idata = f.read(10)
try:
id3, vmaj, vrev, flags, insize = struct.unpack('>3sBBB4s', idata)
except struct.error:
pass
else:
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize >= 0:
delete_bytes(f, insize + 10, 0)
class ID3FileType(mutagen.FileType):
"""ID3FileType(filething, ID3=None, **kwargs)
An unknown type of file with ID3 tags.
Args:
filething (filething): A filename or file-like object
ID3 (ID3): An ID3 subclass to use for tags.
Raises:
mutagen.MutagenError: In case loading the file failed
Load stream and tag information from a file.
A custom tag reader may be used in instead of the default
mutagen.id3.ID3 object, e.g. an EasyID3 reader.
"""
__module__ = "mutagen.id3"
ID3 = ID3
class _Info(mutagen.StreamInfo):
length = 0
def __init__(self, fileobj, offset):
pass
@staticmethod
def pprint():
return u"Unknown format with ID3 tag"
@staticmethod
def score(filename, fileobj, header_data):
return header_data.startswith(b"ID3")
def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
"""
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists")
@loadfile()
def load(self, filething, ID3=None, **kwargs):
# see __init__ for docs
fileobj = filething.fileobj
if ID3 is None:
ID3 = self.ID3
else:
# If this was initialized with EasyID3, remember that for
# when tags are auto-instantiated in add_tags.
self.ID3 = ID3
try:
self.tags = ID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
if self.tags is not None:
try:
offset = self.tags.size
except AttributeError:
offset = None
else:
offset = None
self.info = self._Info(fileobj, offset)
| 12,321
|
Python
|
.py
| 321
| 28.58567
| 79
| 0.582941
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,529
|
_id3v1.py
|
rembo10_headphones/lib/mutagen/id3/_id3v1.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
# 2006 Lukas Lalinsky
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import errno
from struct import error as StructError, unpack
from mutagen._util import bchr
from ._frames import TCON, TRCK, COMM, TDRC, TYER, TALB, TPE1, TIT2
def find_id3v1(fileobj, v2_version=4, known_frames=None):
"""Returns a tuple of (id3tag, offset_to_end) or (None, 0)
offset mainly because we used to write too short tags in some cases and
we need the offset to delete them.
v2_version: Decides whether ID3v2.3 or ID3v2.4 tags
should be returned. Must be 3 or 4.
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
"""
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
# id3v1 is always at the end (after apev2)
extra_read = b"APETAGEX".index(b"TAG")
old_pos = fileobj.tell()
try:
fileobj.seek(-128 - extra_read, 2)
except IOError as e:
if e.errno == errno.EINVAL:
# If the file is too small, might be ok since we wrote too small
# tags at some point. let's see how the parsing goes..
fileobj.seek(0, 0)
else:
raise
data = fileobj.read(128 + extra_read)
fileobj.seek(old_pos, 0)
try:
idx = data.index(b"TAG")
except ValueError:
return (None, 0)
else:
# FIXME: make use of the apev2 parser here
# if TAG is part of APETAGEX assume this is an APEv2 tag
try:
ape_idx = data.index(b"APETAGEX")
except ValueError:
pass
else:
if idx == ape_idx + extra_read:
return (None, 0)
tag = ParseID3v1(data[idx:], v2_version, known_frames)
if tag is None:
return (None, 0)
offset = idx - len(data)
return (tag, offset)
# ID3v1.1 support.
def ParseID3v1(data, v2_version=4, known_frames=None):
"""Parse an ID3v1 tag, returning a list of ID3v2 frames
Returns a {frame_name: frame} dict or None.
v2_version: Decides whether ID3v2.3 or ID3v2.4 tags
should be returned. Must be 3 or 4.
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
"""
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
try:
data = data[data.index(b"TAG"):]
except ValueError:
return None
if 128 < len(data) or len(data) < 124:
return None
# Issue #69 - Previous versions of Mutagen, when encountering
# out-of-spec TDRC and TYER frames of less than four characters,
# wrote only the characters available - e.g. "1" or "" - into the
# year field. To parse those, reduce the size of the year field.
# Amazingly, "0s" works as a struct format string.
unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124)
try:
tag, title, artist, album, year, comment, track, genre = unpack(
unpack_fmt, data)
except StructError:
return None
if tag != b"TAG":
return None
def fix(data):
return data.split(b"\x00")[0].strip().decode('latin1')
title, artist, album, year, comment = map(
fix, [title, artist, album, year, comment])
frame_class = {
"TIT2": TIT2,
"TPE1": TPE1,
"TALB": TALB,
"TYER": TYER,
"TDRC": TDRC,
"COMM": COMM,
"TRCK": TRCK,
"TCON": TCON,
}
for key in frame_class:
if known_frames is not None:
if key in known_frames:
frame_class[key] = known_frames[key]
else:
frame_class[key] = None
frames = {}
if title and frame_class["TIT2"]:
frames["TIT2"] = frame_class["TIT2"](encoding=0, text=title)
if artist and frame_class["TPE1"]:
frames["TPE1"] = frame_class["TPE1"](encoding=0, text=[artist])
if album and frame_class["TALB"]:
frames["TALB"] = frame_class["TALB"](encoding=0, text=album)
if year:
if v2_version == 3 and frame_class["TYER"]:
frames["TYER"] = frame_class["TYER"](encoding=0, text=year)
elif frame_class["TDRC"]:
frames["TDRC"] = frame_class["TDRC"](encoding=0, text=year)
if comment and frame_class["COMM"]:
frames["COMM"] = frame_class["COMM"](
encoding=0, lang="eng", desc="ID3v1 Comment", text=comment)
# Don't read a track number if it looks like the comment was
# padded with spaces instead of nulls (thanks, WinAmp).
if (track and frame_class["TRCK"] and
((track != 32) or (data[-3] == b'\x00'[0]))):
frames["TRCK"] = TRCK(encoding=0, text=str(track))
if genre != 255 and frame_class["TCON"]:
frames["TCON"] = TCON(encoding=0, text=str(genre))
return frames
def MakeID3v1(id3):
"""Return an ID3v1.1 tag string from a dict of ID3v2.4 frames."""
v1 = {}
for v2id, name in {"TIT2": "title", "TPE1": "artist",
"TALB": "album"}.items():
if v2id in id3:
text = id3[v2id].text[0].encode('latin1', 'replace')[:30]
else:
text = b""
v1[name] = text + (b"\x00" * (30 - len(text)))
if "COMM" in id3:
cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28]
else:
cmnt = b""
v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt)))
if "TRCK" in id3:
try:
v1["track"] = bchr(+id3["TRCK"])
except ValueError:
v1["track"] = b"\x00"
else:
v1["track"] = b"\x00"
if "TCON" in id3:
try:
genre = id3["TCON"].genres[0]
except IndexError:
pass
else:
if genre in TCON.GENRES:
v1["genre"] = bchr(TCON.GENRES.index(genre))
if "genre" not in v1:
v1["genre"] = b"\xff"
if "TDRC" in id3:
year = str(id3["TDRC"]).encode('ascii')
elif "TYER" in id3:
year = str(id3["TYER"]).encode('ascii')
else:
year = b""
v1["year"] = (year + b"\x00\x00\x00\x00")[:4]
return (
b"TAG" +
v1["title"] +
v1["artist"] +
v1["album"] +
v1["year"] +
v1["comment"] +
v1["track"] +
v1["genre"]
)
| 6,661
|
Python
|
.py
| 179
| 29.407821
| 76
| 0.578457
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,530
|
_frames.py
|
rembo10_headphones/lib/mutagen/id3/_frames.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import zlib
from struct import unpack
from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch, \
ID3SaveConfig, error
from ._specs import BinaryDataSpec, StringSpec, Latin1TextSpec, \
EncodedTextSpec, ByteSpec, EncodingSpec, ASPIIndexSpec, SizedIntegerSpec, \
IntegerSpec, Encoding, VolumeAdjustmentsSpec, VolumePeakSpec, \
VolumeAdjustmentSpec, ChannelSpec, MultiSpec, SynchronizedTextSpec, \
KeyEventSpec, TimeStampSpec, EncodedNumericPartTextSpec, \
EncodedNumericTextSpec, SpecError, PictureTypeSpec, ID3FramesSpec, \
Latin1TextListSpec, CTOCFlagsSpec, FrameIDSpec, RVASpec
def _bytes2key(b):
assert isinstance(b, bytes)
return b.decode("latin1")
class Frame(object):
"""Fundamental unit of ID3 data.
ID3 tags are split into frames. Each frame has a potentially
different structure, and so this base class is not very featureful.
"""
FLAG23_ALTERTAG = 0x8000
FLAG23_ALTERFILE = 0x4000
FLAG23_READONLY = 0x2000
FLAG23_COMPRESS = 0x0080
FLAG23_ENCRYPT = 0x0040
FLAG23_GROUP = 0x0020
FLAG24_ALTERTAG = 0x4000
FLAG24_ALTERFILE = 0x2000
FLAG24_READONLY = 0x1000
FLAG24_GROUPID = 0x0040
FLAG24_COMPRESS = 0x0008
FLAG24_ENCRYPT = 0x0004
FLAG24_UNSYNCH = 0x0002
FLAG24_DATALEN = 0x0001
_framespec = []
_optionalspec = []
def __init__(self, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and \
isinstance(args[0], type(self)):
other = args[0]
# ask the sub class to fill in our data
other._to_other(self)
else:
for checker, val in zip(self._framespec, args):
setattr(self, checker.name, val)
for checker in self._framespec[len(args):]:
setattr(self, checker.name,
kwargs.get(checker.name, checker.default))
for spec in self._optionalspec:
if spec.name in kwargs:
setattr(self, spec.name, kwargs[spec.name])
else:
break
def __setattr__(self, name, value):
for checker in self._framespec:
if checker.name == name:
self._setattr(name, checker.validate(self, value))
return
for checker in self._optionalspec:
if checker.name == name:
self._setattr(name, checker.validate(self, value))
return
super(Frame, self).__setattr__(name, value)
def _setattr(self, name, value):
self.__dict__[name] = value
def _to_other(self, other):
# this impl covers subclasses with the same framespec
if other._framespec is not self._framespec:
raise ValueError
for checker in other._framespec:
other._setattr(checker.name, getattr(self, checker.name))
# this impl covers subclasses with the same optionalspec
if other._optionalspec is not self._optionalspec:
raise ValueError
for checker in other._optionalspec:
if hasattr(self, checker.name):
other._setattr(checker.name, getattr(self, checker.name))
def _merge_frame(self, other):
# default impl, use the new tag over the old one
return other
def _upgrade_frame(self):
"""Returns either this instance or a new instance if this is a v2.2
frame and an upgrade to a v2.3/4 equivalent is viable.
If this is a v2.2 instance and there is no upgrade path, returns None.
"""
# turn 2.2 into 2.3/2.4 tags
if len(type(self).__name__) == 3:
base = type(self).__base__
if base is Frame:
return
return base(self)
else:
return self
def _get_v23_frame(self, **kwargs):
"""Returns a frame copy which is suitable for writing into a v2.3 tag.
kwargs get passed to the specs.
"""
new_kwargs = {}
for checker in self._framespec:
name = checker.name
value = getattr(self, name)
new_kwargs[name] = checker._validate23(self, value, **kwargs)
for checker in self._optionalspec:
name = checker.name
if hasattr(self, name):
value = getattr(self, name)
new_kwargs[name] = checker._validate23(self, value, **kwargs)
return type(self)(**new_kwargs)
@property
def HashKey(self):
"""An internal key used to ensure frame uniqueness in a tag"""
return self.FrameID
@property
def FrameID(self):
"""ID3v2 three or four character frame ID"""
return type(self).__name__
def __repr__(self):
"""Python representation of a frame.
The string returned is a valid Python expression to construct
a copy of this frame.
"""
kw = []
for attr in self._framespec:
# so repr works during __init__
if hasattr(self, attr.name):
kw.append('%s=%r' % (attr.name, getattr(self, attr.name)))
for attr in self._optionalspec:
if hasattr(self, attr.name):
kw.append('%s=%r' % (attr.name, getattr(self, attr.name)))
return '%s(%s)' % (type(self).__name__, ', '.join(kw))
def _readData(self, id3, data):
"""Raises ID3JunkFrameError; Returns leftover data"""
for reader in self._framespec:
if len(data) or reader.handle_nodata:
try:
value, data = reader.read(id3, self, data)
except SpecError as e:
raise ID3JunkFrameError(e)
else:
raise ID3JunkFrameError("no data left")
self._setattr(reader.name, value)
for reader in self._optionalspec:
if len(data) or reader.handle_nodata:
try:
value, data = reader.read(id3, self, data)
except SpecError as e:
raise ID3JunkFrameError(e)
else:
break
self._setattr(reader.name, value)
return data
def _writeData(self, config=None):
"""Raises error"""
if config is None:
config = ID3SaveConfig()
if config.v2_version == 3:
frame = self._get_v23_frame(sep=config.v23_separator)
else:
frame = self
data = []
for writer in self._framespec:
try:
data.append(
writer.write(config, frame, getattr(frame, writer.name)))
except SpecError as e:
raise error(e)
for writer in self._optionalspec:
try:
data.append(
writer.write(config, frame, getattr(frame, writer.name)))
except AttributeError:
break
except SpecError as e:
raise error(e)
return b''.join(data)
def pprint(self):
"""Return a human-readable representation of the frame."""
return "%s=%s" % (type(self).__name__, self._pprint())
def _pprint(self):
return "[unrepresentable data]"
@classmethod
def _fromData(cls, header, tflags, data):
"""Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
"""
if header.version >= header._V24:
if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN):
# The data length int is syncsafe in 2.4 (but not 2.3).
# However, we don't actually need the data length int,
# except to work around a QL 0.12 bug, and in that case
# all we need are the raw bytes.
datalen_bytes = data[:4]
data = data[4:]
if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
# Some things write synch-unsafe data with either the frame
# or global unsynch flag set. Try to load them as is.
# https://github.com/quodlibet/mutagen/issues/210
# https://github.com/quodlibet/mutagen/issues/223
pass
if tflags & Frame.FLAG24_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG24_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error:
# the initial mutagen that went out with QL 0.12 did not
# write the 4 bytes of uncompressed size. Compensate.
data = datalen_bytes + data
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(
'zlib: %s: %r' % (err, data))
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
if len(data) < 4:
raise ID3JunkFrameError('frame too small: %r' % data)
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG23_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError('zlib: %s: %r' % (err, data))
frame = cls()
frame._readData(header, data)
return frame
def __hash__(self):
raise TypeError("Frame objects are unhashable")
class CHAP(Frame):
"""Chapter"""
_framespec = [
Latin1TextSpec("element_id"),
SizedIntegerSpec("start_time", 4, default=0),
SizedIntegerSpec("end_time", 4, default=0),
SizedIntegerSpec("start_offset", 4, default=0xffffffff),
SizedIntegerSpec("end_offset", 4, default=0xffffffff),
ID3FramesSpec("sub_frames"),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.element_id)
def __eq__(self, other):
if not isinstance(other, CHAP):
return False
self_frames = self.sub_frames or {}
other_frames = other.sub_frames or {}
if sorted(self_frames.values()) != sorted(other_frames.values()):
return False
return self.element_id == other.element_id and \
self.start_time == other.start_time and \
self.end_time == other.end_time and \
self.start_offset == other.start_offset and \
self.end_offset == other.end_offset
__hash__ = Frame.__hash__
def _pprint(self):
frame_pprint = u""
for frame in self.sub_frames.values():
for line in frame.pprint().splitlines():
frame_pprint += "\n" + " " * 4 + line
return u"%s time=%d..%d offset=%d..%d%s" % (
self.element_id, self.start_time, self.end_time,
self.start_offset, self.end_offset, frame_pprint)
class CTOC(Frame):
"""Table of contents"""
_framespec = [
Latin1TextSpec("element_id"),
CTOCFlagsSpec("flags", default=0),
Latin1TextListSpec("child_element_ids"),
ID3FramesSpec("sub_frames"),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.element_id)
__hash__ = Frame.__hash__
def __eq__(self, other):
if not isinstance(other, CTOC):
return False
self_frames = self.sub_frames or {}
other_frames = other.sub_frames or {}
if sorted(self_frames.values()) != sorted(other_frames.values()):
return False
return self.element_id == other.element_id and \
self.flags == other.flags and \
self.child_element_ids == other.child_element_ids
def _pprint(self):
frame_pprint = u""
if getattr(self, "sub_frames", None):
frame_pprint += "\n" + "\n".join(
[" " * 4 + f.pprint() for f in self.sub_frames.values()])
return u"%s flags=%d child_element_ids=%s%s" % (
self.element_id, int(self.flags),
u",".join(self.child_element_ids), frame_pprint)
class TextFrame(Frame):
"""Text strings.
Text frames support casts to unicode or str objects, as well as
list-like indexing, extend, and append.
Iterating over a TextFrame iterates over its strings, not its
characters.
Text frames have a 'text' attribute which is the list of strings,
and an 'encoding' attribute; 0 for ISO-8859 1, 1 UTF-16, 2 for
UTF-16BE, and 3 for UTF-8. If you don't want to worry about
encodings, just set it to 3.
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000', default=[]),
]
def __bytes__(self):
return str(self).encode('utf-8')
def __str__(self):
return u'\u0000'.join(self.text)
def __eq__(self, other):
if isinstance(other, bytes):
return bytes(self) == other
elif isinstance(other, str):
return str(self) == other
return self.text == other
__hash__ = Frame.__hash__
def __getitem__(self, item):
return self.text[item]
def __iter__(self):
return iter(self.text)
def append(self, value):
"""Append a string."""
return self.text.append(value)
def extend(self, value):
"""Extend the list by appending all strings from the given list."""
return self.text.extend(value)
def _merge_frame(self, other):
# merge in new values
for val in other[:]:
if val not in self:
self.append(val)
self.encoding = max(self.encoding, other.encoding)
return self
def _pprint(self):
return " / ".join(self.text)
class NumericTextFrame(TextFrame):
"""Numerical text strings.
The numeric value of these frames can be gotten with unary plus, e.g.::
frame = TLEN('12345')
length = +frame
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
MultiSpec('text', EncodedNumericTextSpec('text'), sep=u'\u0000',
default=[]),
]
def __pos__(self):
"""Return the numerical value of the string."""
return int(self.text[0])
class NumericPartTextFrame(TextFrame):
"""Multivalue numerical text strings.
These strings indicate 'part (e.g. track) X of Y', and unary plus
returns the first value::
frame = TRCK('4/15')
track = +frame # track == 4
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
MultiSpec('text', EncodedNumericPartTextSpec('text'), sep=u'\u0000',
default=[]),
]
def __pos__(self):
return int(self.text[0].split("/")[0])
class TimeStampTextFrame(TextFrame):
"""A list of time stamps.
The 'text' attribute in this frame is a list of ID3TimeStamp
objects, not a list of strings.
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
MultiSpec('text', TimeStampSpec('stamp'), sep=u',', default=[]),
]
def __bytes__(self):
return str(self).encode('utf-8')
def __str__(self):
return u','.join([stamp.text for stamp in self.text])
def _pprint(self):
return u" / ".join([stamp.text for stamp in self.text])
class UrlFrame(Frame):
"""A frame containing a URL string.
The ID3 specification is silent about IRIs and normalized URL
forms. Mutagen assumes all URLs in files are encoded as Latin 1,
but string conversion of this frame returns a UTF-8 representation
for compatibility with other string conversions.
The only sane way to handle URLs in MP3s is to restrict them to
ASCII.
"""
_framespec = [
Latin1TextSpec('url'),
]
def __bytes__(self):
return self.url.encode('utf-8')
def __str__(self):
return self.url
def __eq__(self, other):
return self.url == other
__hash__ = Frame.__hash__
def _pprint(self):
return self.url
class UrlFrameU(UrlFrame):
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.url)
class TALB(TextFrame):
"Album"
class TBPM(NumericTextFrame):
"Beats per minute"
class TCOM(TextFrame):
"Composer"
class TCON(TextFrame):
"""Content type (Genre)
ID3 has several ways genres can be represented; for convenience,
use the 'genres' property rather than the 'text' attribute.
"""
from mutagen._constants import GENRES
GENRES = GENRES
def __get_genres(self):
genres = []
import re
genre_re = re.compile(r"((?:\((?P<id>[0-9]+|RX|CR)\))*)(?P<str>.+)?")
for value in self.text:
# 255 possible entries in id3v1
if value.isdigit() and int(value) < 256:
try:
genres.append(self.GENRES[int(value)])
except IndexError:
genres.append(u"Unknown")
elif value == "CR":
genres.append(u"Cover")
elif value == "RX":
genres.append(u"Remix")
elif value:
newgenres = []
genreid, dummy, genrename = genre_re.match(value).groups()
if genreid:
for gid in genreid[1:-1].split(")("):
if gid.isdigit() and int(gid) < len(self.GENRES):
gid = str(self.GENRES[int(gid)])
newgenres.append(gid)
elif gid == "CR":
newgenres.append(u"Cover")
elif gid == "RX":
newgenres.append(u"Remix")
else:
newgenres.append(u"Unknown")
if genrename:
# "Unescaping" the first parenthesis
if genrename.startswith("(("):
genrename = genrename[1:]
if genrename not in newgenres:
newgenres.append(genrename)
genres.extend(newgenres)
return genres
def __set_genres(self, genres):
if isinstance(genres, str):
genres = [genres]
self.text = [self.__decode(g) for g in genres]
def __decode(self, value):
if isinstance(value, bytes):
enc = EncodedTextSpec._encodings[self.encoding][0]
return value.decode(enc)
else:
return value
genres = property(__get_genres, __set_genres, None,
"A list of genres parsed from the raw text data.")
def _pprint(self):
return " / ".join(self.genres)
class TCOP(TextFrame):
"Copyright (c)"
class TCMP(NumericTextFrame):
"iTunes Compilation Flag"
class TDAT(TextFrame):
"Date of recording (DDMM)"
class TDEN(TimeStampTextFrame):
"Encoding Time"
class TDES(TextFrame):
"iTunes Podcast Description"
class TKWD(TextFrame):
"iTunes Podcast Keywords"
class TCAT(TextFrame):
"iTunes Podcast Category"
class MVNM(TextFrame):
"iTunes Movement Name"
class MVN(MVNM):
"iTunes Movement Name"
class MVIN(NumericPartTextFrame):
"iTunes Movement Number/Count"
class MVI(MVIN):
"iTunes Movement Number/Count"
class GRP1(TextFrame):
"iTunes Grouping"
class GP1(GRP1):
"iTunes Grouping"
class TDOR(TimeStampTextFrame):
"Original Release Time"
class TDLY(NumericTextFrame):
"Audio Delay (ms)"
class TDRC(TimeStampTextFrame):
"Recording Time"
class TDRL(TimeStampTextFrame):
"Release Time"
class TDTG(TimeStampTextFrame):
"Tagging Time"
class TENC(TextFrame):
"Encoder"
class TEXT(TextFrame):
"Lyricist"
class TFLT(TextFrame):
"File type"
class TGID(TextFrame):
"iTunes Podcast Identifier"
class TIME(TextFrame):
"Time of recording (HHMM)"
class TIT1(TextFrame):
"Content group description"
class TIT2(TextFrame):
"Title"
class TIT3(TextFrame):
"Subtitle/Description refinement"
class TKEY(TextFrame):
"Starting Key"
class TLAN(TextFrame):
"Audio Languages"
class TLEN(NumericTextFrame):
"Audio Length (ms)"
class TMED(TextFrame):
"Source Media Type"
class TMOO(TextFrame):
"Mood"
class TOAL(TextFrame):
"Original Album"
class TOFN(TextFrame):
"Original Filename"
class TOLY(TextFrame):
"Original Lyricist"
class TOPE(TextFrame):
"Original Artist/Performer"
class TORY(NumericTextFrame):
"Original Release Year"
class TOWN(TextFrame):
"Owner/Licensee"
class TPE1(TextFrame):
"Lead Artist/Performer/Soloist/Group"
class TPE2(TextFrame):
"Band/Orchestra/Accompaniment"
class TPE3(TextFrame):
"Conductor"
class TPE4(TextFrame):
"Interpreter/Remixer/Modifier"
class TPOS(NumericPartTextFrame):
"Part of set"
class TPRO(TextFrame):
"Produced (P)"
class TPUB(TextFrame):
"Publisher"
class TRCK(NumericPartTextFrame):
"Track Number"
class TRDA(TextFrame):
"Recording Dates"
class TRSN(TextFrame):
"Internet Radio Station Name"
class TRSO(TextFrame):
"Internet Radio Station Owner"
class TSIZ(NumericTextFrame):
"Size of audio data (bytes)"
class TSO2(TextFrame):
"iTunes Album Artist Sort"
class TSOA(TextFrame):
"Album Sort Order key"
class TSOC(TextFrame):
"iTunes Composer Sort"
class TSOP(TextFrame):
"Perfomer Sort Order key"
class TSOT(TextFrame):
"Title Sort Order key"
class TSRC(TextFrame):
"International Standard Recording Code (ISRC)"
class TSSE(TextFrame):
"Encoder settings"
class TSST(TextFrame):
"Set Subtitle"
class TYER(NumericTextFrame):
"Year of recording"
class TXXX(TextFrame):
"""User-defined text data.
TXXX frames have a 'desc' attribute which is set to any Unicode
value (though the encoding of the text and the description must be
the same). Many taggers use this frame to store freeform keys.
"""
_framespec = [
EncodingSpec('encoding'),
EncodedTextSpec('desc'),
MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000', default=[]),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
def _pprint(self):
return "%s=%s" % (self.desc, " / ".join(self.text))
class WCOM(UrlFrameU):
"Commercial Information"
class WCOP(UrlFrame):
"Copyright Information"
class WFED(UrlFrame):
"iTunes Podcast Feed"
class WOAF(UrlFrame):
"Official File Information"
class WOAR(UrlFrameU):
"Official Artist/Performer Information"
class WOAS(UrlFrame):
"Official Source Information"
class WORS(UrlFrame):
"Official Internet Radio Information"
class WPAY(UrlFrame):
"Payment Information"
class WPUB(UrlFrame):
"Official Publisher Information"
class WXXX(UrlFrame):
"""User-defined URL data.
Like TXXX, this has a freeform description associated with it.
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
EncodedTextSpec('desc'),
Latin1TextSpec('url'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
class PairedTextFrame(Frame):
"""Paired text strings.
Some ID3 frames pair text strings, to associate names with a more
specific involvement in the song. The 'people' attribute of these
frames contains a list of pairs::
[['trumpet', 'Miles Davis'], ['bass', 'Paul Chambers']]
Like text frames, these frames also have an encoding attribute.
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
MultiSpec('people',
EncodedTextSpec('involvement'),
EncodedTextSpec('person'), default=[])
]
def __eq__(self, other):
return self.people == other
__hash__ = Frame.__hash__
class TIPL(PairedTextFrame):
"Involved People List"
class TMCL(PairedTextFrame):
"Musicians Credits List"
class IPLS(TIPL):
"Involved People List"
class BinaryFrame(Frame):
"""Binary data
The 'data' attribute contains the raw byte string.
"""
_framespec = [
BinaryDataSpec('data'),
]
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class MCDI(BinaryFrame):
"Binary dump of CD's TOC"
class ETCO(Frame):
"""Event timing codes."""
_framespec = [
ByteSpec("format", default=1),
KeyEventSpec("events", default=[]),
]
def __eq__(self, other):
return self.events == other
__hash__ = Frame.__hash__
class MLLT(Frame):
"""MPEG location lookup table.
This frame's attributes may be changed in the future based on
feedback from real-world use.
"""
_framespec = [
SizedIntegerSpec('frames', size=2, default=0),
SizedIntegerSpec('bytes', size=3, default=0),
SizedIntegerSpec('milliseconds', size=3, default=0),
ByteSpec('bits_for_bytes', default=0),
ByteSpec('bits_for_milliseconds', default=0),
BinaryDataSpec('data'),
]
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class SYTC(Frame):
"""Synchronised tempo codes.
This frame's attributes may be changed in the future based on
feedback from real-world use.
"""
_framespec = [
ByteSpec("format", default=1),
BinaryDataSpec("data"),
]
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class USLT(Frame):
"""Unsynchronised lyrics/text transcription.
Lyrics have a three letter ISO language code ('lang'), a
description ('desc'), and a block of plain text ('text').
"""
_framespec = [
EncodingSpec('encoding', default=Encoding.UTF16),
StringSpec('lang', length=3, default=u"XXX"),
EncodedTextSpec('desc'),
EncodedTextSpec('text'),
]
@property
def HashKey(self):
return '%s:%s:%s' % (self.FrameID, self.desc, self.lang)
def __bytes__(self):
return self.text.encode('utf-8')
def __str__(self):
return self.text
def __eq__(self, other):
return self.text == other
__hash__ = Frame.__hash__
def _pprint(self):
return "%s=%s=%s" % (self.desc, self.lang, self.text)
class SYLT(Frame):
"""Synchronised lyrics/text."""
_framespec = [
EncodingSpec('encoding'),
StringSpec('lang', length=3, default=u"XXX"),
ByteSpec('format', default=1),
ByteSpec('type', default=0),
EncodedTextSpec('desc'),
SynchronizedTextSpec('text'),
]
@property
def HashKey(self):
return '%s:%s:%s' % (self.FrameID, self.desc, self.lang)
def _pprint(self):
return str(self)
def __eq__(self, other):
return str(self) == other
__hash__ = Frame.__hash__
def __str__(self):
unit = 'fr' if self.format == 1 else 'ms'
return u"\n".join("[{0}{1}]: {2}".format(time, unit, text)
for (text, time) in self.text)
def __bytes__(self):
return str(self).encode("utf-8")
class COMM(TextFrame):
"""User comment.
User comment frames have a descrption, like TXXX, and also a three
letter ISO language code in the 'lang' attribute.
"""
_framespec = [
EncodingSpec('encoding'),
StringSpec('lang', length=3, default="XXX"),
EncodedTextSpec('desc'),
MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000', default=[]),
]
@property
def HashKey(self):
return '%s:%s:%s' % (self.FrameID, self.desc, self.lang)
def _pprint(self):
return "%s=%s=%s" % (self.desc, self.lang, " / ".join(self.text))
class RVA2(Frame):
"""Relative volume adjustment (2).
This frame is used to implemented volume scaling, and in
particular, normalization using ReplayGain.
Attributes:
* desc -- description or context of this adjustment
* channel -- audio channel to adjust (master is 1)
* gain -- a + or - dB gain relative to some reference level
* peak -- peak of the audio as a floating point number, [0, 1]
When storing ReplayGain tags, use descriptions of 'album' and
'track' on channel 1.
"""
_framespec = [
Latin1TextSpec('desc'),
ChannelSpec('channel', default=1),
VolumeAdjustmentSpec('gain', default=1),
VolumePeakSpec('peak', default=1),
]
_channels = ["Other", "Master volume", "Front right", "Front left",
"Back right", "Back left", "Front centre", "Back centre",
"Subwoofer"]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
def __eq__(self, other):
try:
return ((str(self) == other) or
(self.desc == other.desc and
self.channel == other.channel and
self.gain == other.gain and
self.peak == other.peak))
except AttributeError:
return False
__hash__ = Frame.__hash__
def __str__(self):
return "%s: %+0.4f dB/%0.4f" % (
self._channels[self.channel], self.gain, self.peak)
class EQU2(Frame):
"""Equalisation (2).
Attributes:
method -- interpolation method (0 = band, 1 = linear)
desc -- identifying description
adjustments -- list of (frequency, vol_adjustment) pairs
"""
_framespec = [
ByteSpec("method", default=0),
Latin1TextSpec("desc"),
VolumeAdjustmentsSpec("adjustments", default=[]),
]
def __eq__(self, other):
return self.adjustments == other
__hash__ = Frame.__hash__
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
class RVAD(Frame):
"""Relative volume adjustment"""
_framespec = [
RVASpec("adjustments", stereo_only=False),
]
__hash__ = Frame.__hash__
def __eq__(self, other):
if not isinstance(other, RVAD):
return False
return self.adjustments == other.adjustments
# class EQUA: unsupported
class RVRB(Frame):
"""Reverb."""
_framespec = [
SizedIntegerSpec('left', size=2, default=0),
SizedIntegerSpec('right', size=2, default=0),
ByteSpec('bounce_left', default=0),
ByteSpec('bounce_right', default=0),
ByteSpec('feedback_ltl', default=0),
ByteSpec('feedback_ltr', default=0),
ByteSpec('feedback_rtr', default=0),
ByteSpec('feedback_rtl', default=0),
ByteSpec('premix_ltr', default=0),
ByteSpec('premix_rtl', default=0),
]
def __eq__(self, other):
return (self.left, self.right) == other
__hash__ = Frame.__hash__
class APIC(Frame):
"""Attached (or linked) Picture.
Attributes:
* encoding -- text encoding for the description
* mime -- a MIME type (e.g. image/jpeg) or '-->' if the data is a URI
* type -- the source of the image (3 is the album front cover)
* desc -- a text description of the image
* data -- raw image data, as a byte string
Mutagen will automatically compress large images when saving tags.
"""
_framespec = [
EncodingSpec('encoding'),
Latin1TextSpec('mime'),
PictureTypeSpec('type'),
EncodedTextSpec('desc'),
BinaryDataSpec('data'),
]
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
def _merge_frame(self, other):
other.desc += u" "
return other
def _pprint(self):
type_desc = str(self.type)
if hasattr(self.type, "_pprint"):
type_desc = self.type._pprint()
return "%s, %s (%s, %d bytes)" % (
type_desc, self.desc, self.mime, len(self.data))
class PCNT(Frame):
"""Play counter.
The 'count' attribute contains the (recorded) number of times this
file has been played.
This frame is basically obsoleted by POPM.
"""
_framespec = [
IntegerSpec('count', default=0),
]
def __eq__(self, other):
return self.count == other
__hash__ = Frame.__hash__
def __pos__(self):
return self.count
def _pprint(self):
return str(self.count)
class PCST(Frame):
"""iTunes Podcast Flag"""
_framespec = [
IntegerSpec('value', default=0),
]
def __eq__(self, other):
return self.value == other
__hash__ = Frame.__hash__
def __pos__(self):
return self.value
def _pprint(self):
return str(self.value)
class POPM(Frame):
"""Popularimeter.
This frame keys a rating (out of 255) and a play count to an email
address.
Attributes:
* email -- email this POPM frame is for
* rating -- rating from 0 to 255
* count -- number of times the files has been played (optional)
"""
_framespec = [
Latin1TextSpec('email'),
ByteSpec('rating', default=0),
]
_optionalspec = [
IntegerSpec('count', default=0),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.email)
def __eq__(self, other):
return self.rating == other
__hash__ = Frame.__hash__
def __pos__(self):
return self.rating
def _pprint(self):
return "%s=%r %r/255" % (
self.email, getattr(self, 'count', None), self.rating)
class GEOB(Frame):
"""General Encapsulated Object.
A blob of binary data, that is not a picture (those go in APIC).
Attributes:
* encoding -- encoding of the description
* mime -- MIME type of the data or '-->' if the data is a URI
* filename -- suggested filename if extracted
* desc -- text description of the data
* data -- raw data, as a byte string
"""
_framespec = [
EncodingSpec('encoding'),
Latin1TextSpec('mime'),
EncodedTextSpec('filename'),
EncodedTextSpec('desc'),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.desc)
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class RBUF(Frame):
"""Recommended buffer size.
Attributes:
* size -- recommended buffer size in bytes
* info -- if ID3 tags may be elsewhere in the file (optional)
* offset -- the location of the next ID3 tag, if any
Mutagen will not find the next tag itself.
"""
_framespec = [
SizedIntegerSpec('size', size=3, default=0),
]
_optionalspec = [
ByteSpec('info', default=0),
SizedIntegerSpec('offset', size=4, default=0),
]
def __eq__(self, other):
return self.size == other
__hash__ = Frame.__hash__
def __pos__(self):
return self.size
class AENC(Frame):
"""Audio encryption.
Attributes:
* owner -- key identifying this encryption type
* preview_start -- unencrypted data block offset
* preview_length -- number of unencrypted blocks
* data -- data required for decryption (optional)
Mutagen cannot decrypt files.
"""
_framespec = [
Latin1TextSpec('owner'),
SizedIntegerSpec('preview_start', size=2, default=0),
SizedIntegerSpec('preview_length', size=2, default=0),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.owner)
def __bytes__(self):
return self.owner.encode('utf-8')
def __str__(self):
return self.owner
def __eq__(self, other):
return self.owner == other
__hash__ = Frame.__hash__
class LINK(Frame):
"""Linked information.
Attributes:
* frameid -- the ID of the linked frame
* url -- the location of the linked frame
* data -- further ID information for the frame
"""
_framespec = [
FrameIDSpec('frameid', length=4),
Latin1TextSpec('url'),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return "%s:%s:%s:%s" % (
self.FrameID, self.frameid, self.url, _bytes2key(self.data))
def __eq__(self, other):
return (self.frameid, self.url, self.data) == other
__hash__ = Frame.__hash__
class POSS(Frame):
"""Position synchronisation frame
Attribute:
* format -- format of the position attribute (frames or milliseconds)
* position -- current position of the file
"""
_framespec = [
ByteSpec('format', default=1),
IntegerSpec('position', default=0),
]
def __pos__(self):
return self.position
def __eq__(self, other):
return self.position == other
__hash__ = Frame.__hash__
class UFID(Frame):
"""Unique file identifier.
Attributes:
* owner -- format/type of identifier
* data -- identifier
"""
_framespec = [
Latin1TextSpec('owner'),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.owner)
def __eq__(s, o):
if isinstance(o, UFI):
return s.owner == o.owner and s.data == o.data
else:
return s.data == o
__hash__ = Frame.__hash__
def _pprint(self):
return "%s=%r" % (self.owner, self.data)
class USER(Frame):
"""Terms of use.
Attributes:
* encoding -- text encoding
* lang -- ISO three letter language code
* text -- licensing terms for the audio
"""
_framespec = [
EncodingSpec('encoding'),
StringSpec('lang', length=3, default=u"XXX"),
EncodedTextSpec('text'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.lang)
def __bytes__(self):
return self.text.encode('utf-8')
def __str__(self):
return self.text
def __eq__(self, other):
return self.text == other
__hash__ = Frame.__hash__
def _pprint(self):
return "%r=%s" % (self.lang, self.text)
class OWNE(Frame):
"""Ownership frame."""
_framespec = [
EncodingSpec('encoding'),
Latin1TextSpec('price'),
StringSpec('date', length=8, default=u"19700101"),
EncodedTextSpec('seller'),
]
def __bytes__(self):
return self.seller.encode('utf-8')
def __str__(self):
return self.seller
def __eq__(self, other):
return self.seller == other
__hash__ = Frame.__hash__
class COMR(Frame):
"""Commercial frame."""
_framespec = [
EncodingSpec('encoding'),
Latin1TextSpec('price'),
StringSpec('valid_until', length=8, default=u"19700101"),
Latin1TextSpec('contact'),
ByteSpec('format', default=0),
EncodedTextSpec('seller'),
EncodedTextSpec('desc'),
]
_optionalspec = [
Latin1TextSpec('mime'),
BinaryDataSpec('logo'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, _bytes2key(self._writeData()))
def __eq__(self, other):
return self._writeData() == other._writeData()
__hash__ = Frame.__hash__
class ENCR(Frame):
"""Encryption method registration.
The standard does not allow multiple ENCR frames with the same owner
or the same method. Mutagen only verifies that the owner is unique.
"""
_framespec = [
Latin1TextSpec('owner'),
ByteSpec('method', default=0x80),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return "%s:%s" % (self.FrameID, self.owner)
def __bytes__(self):
return self.data
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class GRID(Frame):
"""Group identification registration."""
_framespec = [
Latin1TextSpec('owner'),
ByteSpec('group', default=0x80),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return '%s:%s' % (self.FrameID, self.group)
def __pos__(self):
return self.group
def __bytes__(self):
return self.owner.encode('utf-8')
def __str__(self):
return self.owner
def __eq__(self, other):
return self.owner == other or self.group == other
__hash__ = Frame.__hash__
class PRIV(Frame):
"""Private frame."""
_framespec = [
Latin1TextSpec('owner'),
BinaryDataSpec('data'),
]
@property
def HashKey(self):
return '%s:%s:%s' % (
self.FrameID, self.owner, _bytes2key(self.data))
def __bytes__(self):
return self.data
def __eq__(self, other):
return self.data == other
def _pprint(self):
return "%s=%r" % (self.owner, self.data)
__hash__ = Frame.__hash__
class SIGN(Frame):
"""Signature frame."""
_framespec = [
ByteSpec('group', default=0x80),
BinaryDataSpec('sig'),
]
@property
def HashKey(self):
return '%s:%s:%s' % (self.FrameID, self.group, _bytes2key(self.sig))
def __bytes__(self):
return self.sig
def __eq__(self, other):
return self.sig == other
__hash__ = Frame.__hash__
class SEEK(Frame):
"""Seek frame.
Mutagen does not find tags at seek offsets.
"""
_framespec = [
IntegerSpec('offset', default=0),
]
def __pos__(self):
return self.offset
def __eq__(self, other):
return self.offset == other
__hash__ = Frame.__hash__
class ASPI(Frame):
"""Audio seek point index.
Attributes: S, L, N, b, and Fi. For the meaning of these, see
the ID3v2.4 specification. Fi is a list of integers.
"""
_framespec = [
SizedIntegerSpec("S", size=4, default=0),
SizedIntegerSpec("L", size=4, default=0),
SizedIntegerSpec("N", size=2, default=0),
ByteSpec("b", default=0),
ASPIIndexSpec("Fi", default=[]),
]
def __eq__(self, other):
return self.Fi == other
__hash__ = Frame.__hash__
# ID3v2.2 frames
class UFI(UFID):
"Unique File Identifier"
class TT1(TIT1):
"Content group description"
class TT2(TIT2):
"Title"
class TT3(TIT3):
"Subtitle/Description refinement"
class TP1(TPE1):
"Lead Artist/Performer/Soloist/Group"
class TP2(TPE2):
"Band/Orchestra/Accompaniment"
class TP3(TPE3):
"Conductor"
class TP4(TPE4):
"Interpreter/Remixer/Modifier"
class TCM(TCOM):
"Composer"
class TXT(TEXT):
"Lyricist"
class TLA(TLAN):
"Audio Language(s)"
class TCO(TCON):
"Content Type (Genre)"
class TAL(TALB):
"Album"
class TPA(TPOS):
"Part of set"
class TRK(TRCK):
"Track Number"
class TRC(TSRC):
"International Standard Recording Code (ISRC)"
class TYE(TYER):
"Year of recording"
class TDA(TDAT):
"Date of recording (DDMM)"
class TIM(TIME):
"Time of recording (HHMM)"
class TRD(TRDA):
"Recording Dates"
class TMT(TMED):
"Source Media Type"
class TFT(TFLT):
"File Type"
class TBP(TBPM):
"Beats per minute"
class TCP(TCMP):
"iTunes Compilation Flag"
class TCR(TCOP):
"Copyright (C)"
class TPB(TPUB):
"Publisher"
class TEN(TENC):
"Encoder"
class TST(TSOT):
"Title Sort Order key"
class TSA(TSOA):
"Album Sort Order key"
class TS2(TSO2):
"iTunes Album Artist Sort"
class TSP(TSOP):
"Perfomer Sort Order key"
class TSC(TSOC):
"iTunes Composer Sort"
class TSS(TSSE):
"Encoder settings"
class TOF(TOFN):
"Original Filename"
class TLE(TLEN):
"Audio Length (ms)"
class TSI(TSIZ):
"Audio Data size (bytes)"
class TDY(TDLY):
"Audio Delay (ms)"
class TKE(TKEY):
"Starting Key"
class TOT(TOAL):
"Original Album"
class TOA(TOPE):
"Original Artist/Perfomer"
class TOL(TOLY):
"Original Lyricist"
class TOR(TORY):
"Original Release Year"
class TXX(TXXX):
"User-defined Text"
class WAF(WOAF):
"Official File Information"
class WAR(WOAR):
"Official Artist/Performer Information"
class WAS(WOAS):
"Official Source Information"
class WCM(WCOM):
"Commercial Information"
class WCP(WCOP):
"Copyright Information"
class WPB(WPUB):
"Official Publisher Information"
class WXX(WXXX):
"User-defined URL"
class IPL(IPLS):
"Involved people list"
class MCI(MCDI):
"Binary dump of CD's TOC"
class ETC(ETCO):
"Event timing codes"
class MLL(MLLT):
"MPEG location lookup table"
class STC(SYTC):
"Synced tempo codes"
class ULT(USLT):
"Unsychronised lyrics/text transcription"
class SLT(SYLT):
"Synchronised lyrics/text"
class COM(COMM):
"Comment"
class RVA(RVAD):
"Relative volume adjustment"
_framespec = [
RVASpec("adjustments", stereo_only=True),
]
def _to_other(self, other):
if not isinstance(other, RVAD):
raise TypeError
other.adjustments = list(self.adjustments)
# class EQU(EQUA)
class REV(RVRB):
"Reverb"
class PIC(APIC):
"""Attached Picture.
The 'mime' attribute of an ID3v2.2 attached picture must be either
'PNG' or 'JPG'.
"""
_framespec = [
EncodingSpec('encoding'),
StringSpec('mime', length=3, default="JPG"),
PictureTypeSpec('type'),
EncodedTextSpec('desc'),
BinaryDataSpec('data'),
]
def _to_other(self, other):
if not isinstance(other, APIC):
raise TypeError
other.encoding = self.encoding
other.mime = self.mime
other.type = self.type
other.desc = self.desc
other.data = self.data
class GEO(GEOB):
"General Encapsulated Object"
class CNT(PCNT):
"Play counter"
class POP(POPM):
"Popularimeter"
class BUF(RBUF):
"Recommended buffer size"
class CRM(Frame):
"""Encrypted meta frame"""
_framespec = [
Latin1TextSpec('owner'),
Latin1TextSpec('desc'),
BinaryDataSpec('data'),
]
def __eq__(self, other):
return self.data == other
__hash__ = Frame.__hash__
class CRA(AENC):
"Audio encryption"
class LNK(LINK):
"""Linked information"""
_framespec = [
FrameIDSpec('frameid', length=3),
Latin1TextSpec('url'),
BinaryDataSpec('data'),
]
def _to_other(self, other):
if not isinstance(other, LINK):
raise TypeError
if isinstance(other, LNK):
new_frameid = self.frameid
else:
try:
new_frameid = Frames_2_2[self.frameid].__bases__[0].__name__
except KeyError:
new_frameid = self.frameid.ljust(4)
# we could end up with invalid IDs here, so bypass the validation
other._setattr("frameid", new_frameid)
other.url = self.url
other.data = self.data
Frames = {}
"""All supported ID3v2.3/4 frames, keyed by frame name."""
Frames_2_2 = {}
"""All supported ID3v2.2 frames, keyed by frame name."""
k, v = None, None
for k, v in globals().items():
if isinstance(v, type) and issubclass(v, Frame):
v.__module__ = "mutagen.id3"
if len(k) == 3:
Frames_2_2[k] = v
elif len(k) == 4:
Frames[k] = v
try:
del k
del v
except NameError:
pass
| 48,571
|
Python
|
.py
| 1,436
| 26.180362
| 79
| 0.604045
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,531
|
_tags.py
|
rembo10_headphones/lib/mutagen/id3/_tags.py
|
# -*- coding: utf-8 -*-
# Copyright 2005 Michael Urman
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
import struct
from itertools import zip_longest
from mutagen._tags import Tags
from mutagen._util import DictProxy, convert_error, read_full
from ._util import BitPaddedInt, unsynch, ID3JunkFrameError, \
ID3EncryptionUnsupportedError, is_valid_frame_id, error, \
ID3NoHeaderError, ID3UnsupportedVersionError, ID3SaveConfig
from ._frames import TDRC, APIC, TDOR, TIME, TIPL, TORY, TDAT, Frames_2_2, \
TextFrame, TYER, Frame, IPLS, Frames
class ID3Header(object):
_V24 = (2, 4, 0)
_V23 = (2, 3, 0)
_V22 = (2, 2, 0)
_V11 = (1, 1)
f_unsynch = property(lambda s: bool(s._flags & 0x80))
f_extended = property(lambda s: bool(s._flags & 0x40))
f_experimental = property(lambda s: bool(s._flags & 0x20))
f_footer = property(lambda s: bool(s._flags & 0x10))
_known_frames = None
@property
def known_frames(self):
if self._known_frames is not None:
return self._known_frames
elif self.version >= ID3Header._V23:
return Frames
elif self.version >= ID3Header._V22:
return Frames_2_2
@convert_error(IOError, error)
def __init__(self, fileobj=None):
"""Raises ID3NoHeaderError, ID3UnsupportedVersionError or error"""
if fileobj is None:
# for testing
self._flags = 0
return
fn = getattr(fileobj, "name", "<unknown>")
data = fileobj.read(10)
if len(data) != 10:
raise ID3NoHeaderError("%s: too small" % fn)
id3, vmaj, vrev, flags, size = struct.unpack('>3sBBB4s', data)
self._flags = flags
self.size = BitPaddedInt(size) + 10
self.version = (2, vmaj, vrev)
if id3 != b'ID3':
raise ID3NoHeaderError("%r doesn't start with an ID3 tag" % fn)
if vmaj not in [2, 3, 4]:
raise ID3UnsupportedVersionError("%r ID3v2.%d not supported"
% (fn, vmaj))
if not BitPaddedInt.has_valid_padding(size):
raise error("Header size not synchsafe")
if (self.version >= self._V24) and (flags & 0x0f):
raise error(
"%r has invalid flags %#02x" % (fn, flags))
elif (self._V23 <= self.version < self._V24) and (flags & 0x1f):
raise error(
"%r has invalid flags %#02x" % (fn, flags))
if self.f_extended:
extsize_data = read_full(fileobj, 4)
frame_id = extsize_data.decode("ascii", "replace")
if frame_id in Frames:
# Some tagger sets the extended header flag but
# doesn't write an extended header; in this case, the
# ID3 data follows immediately. Since no extended
# header is going to be long enough to actually match
# a frame, and if it's *not* a frame we're going to be
# completely lost anyway, this seems to be the most
# correct check.
# https://github.com/quodlibet/quodlibet/issues/126
self._flags ^= 0x40
extsize = 0
fileobj.seek(-4, 1)
elif self.version >= self._V24:
# "Where the 'Extended header size' is the size of the whole
# extended header, stored as a 32 bit synchsafe integer."
extsize = BitPaddedInt(extsize_data) - 4
if not BitPaddedInt.has_valid_padding(extsize_data):
raise error(
"Extended header size not synchsafe")
else:
# "Where the 'Extended header size', currently 6 or 10 bytes,
# excludes itself."
extsize = struct.unpack('>L', extsize_data)[0]
self._extdata = read_full(fileobj, extsize)
def determine_bpi(data, frames, EMPTY=b"\x00" * 10):
"""Takes id3v2.4 frame data and determines if ints or bitpaddedints
should be used for parsing. Needed because iTunes used to write
normal ints for frame sizes.
"""
# count number of tags found as BitPaddedInt and how far past
o = 0
asbpi = 0
while o < len(data) - 10:
part = data[o:o + 10]
if part == EMPTY:
bpioff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack('>4sLH', part)
size = BitPaddedInt(size)
o += 10 + size
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asbpi += 1
else:
bpioff = o - len(data)
# count number of tags found as int and how far past
o = 0
asint = 0
while o < len(data) - 10:
part = data[o:o + 10]
if part == EMPTY:
intoff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack('>4sLH', part)
o += 10 + size
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asint += 1
else:
intoff = o - len(data)
# if more tags as int, or equal and bpi is past and int is not
if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)):
return int
return BitPaddedInt
class ID3Tags(DictProxy, Tags):
__module__ = "mutagen.id3"
def __init__(self, *args, **kwargs):
self.unknown_frames = []
self._unknown_v2_version = 4
super(ID3Tags, self).__init__(*args, **kwargs)
def _read(self, header, data):
frames, unknown_frames, data = read_frames(
header, data, header.known_frames)
for frame in frames:
self._add(frame, False)
self.unknown_frames = unknown_frames
self._unknown_v2_version = header.version[1]
return data
def _write(self, config):
# Sort frames by 'importance', then reverse frame size and then frame
# hash to get a stable result
order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"]
framedata = [
(f, save_frame(f, config=config)) for f in self.values()]
def get_prio(frame):
try:
return order.index(frame.FrameID)
except ValueError:
return len(order)
def sort_key(items):
frame, data = items
return (get_prio(frame), len(data), frame.HashKey)
framedata = [d for (f, d) in sorted(framedata, key=sort_key)]
# only write unknown frames if they were loaded from the version
# we are saving with. Theoretically we could upgrade frames
# but some frames can be nested like CHAP, so there is a chance
# we create a mixed frame mess.
if self._unknown_v2_version == config.v2_version:
framedata.extend(data for data in self.unknown_frames
if len(data) > 10)
return bytearray().join(framedata)
def getall(self, key):
"""Return all frames with a given name (the list may be empty).
Args:
key (text): key for frames to get
This is best explained by examples::
id3.getall('TIT2') == [id3['TIT2']]
id3.getall('TTTT') == []
id3.getall('TXXX') == [TXXX(desc='woo', text='bar'),
TXXX(desc='baz', text='quuuux'), ...]
Since this is based on the frame's HashKey, which is
colon-separated, you can use it to do things like
``getall('COMM:MusicMatch')`` or ``getall('TXXX:QuodLibet:')``.
"""
if key in self:
return [self[key]]
else:
key = key + ":"
return [v for s, v in self.items() if s.startswith(key)]
def setall(self, key, values):
"""Delete frames of the given type and add frames in 'values'.
Args:
key (text): key for frames to delete
values (list[Frame]): frames to add
"""
self.delall(key)
for tag in values:
self[tag.HashKey] = tag
def delall(self, key):
"""Delete all tags of a given kind; see getall.
Args:
key (text): key for frames to delete
"""
if key in self:
del(self[key])
else:
key = key + ":"
for k in list(self.keys()):
if k.startswith(key):
del(self[k])
def pprint(self):
"""
Returns:
text: tags in a human-readable format.
"Human-readable" is used loosely here. The format is intended
to mirror that used for Vorbis or APEv2 output, e.g.
``TIT2=My Title``
However, ID3 frames can have multiple keys:
``POPM=user@example.org=3 128/255``
"""
frames = sorted(Frame.pprint(s) for s in self.values())
return "\n".join(frames)
def _add(self, frame, strict):
"""Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged.
"""
if not isinstance(frame, Frame):
raise TypeError("%r not a Frame instance" % frame)
orig_frame = frame
frame = frame._upgrade_frame()
if frame is None:
if not strict:
return
raise TypeError(
"Can't upgrade %r frame" % type(orig_frame).__name__)
hash_key = frame.HashKey
if strict or hash_key not in self:
self[hash_key] = frame
return
# Try to merge frames, or change the new one. Since changing
# the new one can lead to new conflicts, try until everything is
# either merged or added.
while True:
old_frame = self[hash_key]
new_frame = old_frame._merge_frame(frame)
new_hash = new_frame.HashKey
if new_hash == hash_key:
self[hash_key] = new_frame
break
else:
assert new_frame is frame
if new_hash not in self:
self[new_hash] = new_frame
break
hash_key = new_hash
def loaded_frame(self, tag):
"""Deprecated; use the add method."""
self._add(tag, True)
def add(self, frame):
"""Add a frame to the tag."""
# add = loaded_frame (and vice versa) break applications that
# expect to be able to override loaded_frame (e.g. Quod Libet),
# as does making loaded_frame call add.
self.loaded_frame(frame)
def __setitem__(self, key, tag):
if not isinstance(tag, Frame):
raise TypeError("%r not a Frame instance" % tag)
super(ID3Tags, self).__setitem__(key, tag)
def __update_common(self):
"""Updates done by both v23 and v24 update"""
if "TCON" in self:
# Get rid of "(xx)Foobr" format.
self["TCON"].genres = self["TCON"].genres
mimes = {"PNG": "image/png", "JPG": "image/jpeg"}
for pic in self.getall("APIC"):
if pic.mime in mimes:
newpic = APIC(
encoding=pic.encoding, mime=mimes[pic.mime],
type=pic.type, desc=pic.desc, data=pic.data)
self.add(newpic)
def update_to_v24(self):
"""Convert older tags into an ID3v2.4 tag.
This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to
TDRC). If you intend to save tags, you must call this function
at some point; it is called by default when loading the tag.
"""
self.__update_common()
# TDAT, TYER, and TIME have been turned into TDRC.
timestamps = []
old_frames = [self.pop(n, []) for n in ["TYER", "TDAT", "TIME"]]
for y, d, t in zip_longest(*old_frames, fillvalue=u""):
ym = re.match(r"([0-9]+)\Z", y)
dm = re.match(r"([0-9]{2})([0-9]{2})\Z", d)
tm = re.match(r"([0-9]{2})([0-9]{2})\Z", t)
timestamp = ""
if ym:
timestamp += u"%s" % ym.groups()
if dm:
timestamp += u"-%s-%s" % dm.groups()[::-1]
if tm:
timestamp += u"T%s:%s:00" % tm.groups()
if timestamp:
timestamps.append(timestamp)
if timestamps and "TDRC" not in self:
self.add(TDRC(encoding=0, text=timestamps))
# TORY can be the first part of a TDOR.
if "TORY" in self:
f = self.pop("TORY")
if "TDOR" not in self:
try:
self.add(TDOR(encoding=0, text=str(f)))
except UnicodeDecodeError:
pass
# IPLS is now TIPL.
if "IPLS" in self:
f = self.pop("IPLS")
if "TIPL" not in self:
self.add(TIPL(encoding=f.encoding, people=f.people))
# These can't be trivially translated to any ID3v2.4 tags, or
# should have been removed already.
for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]:
if key in self:
del(self[key])
# Recurse into chapters
for f in self.getall("CHAP"):
f.sub_frames.update_to_v24()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v24()
def update_to_v23(self):
"""Convert older (and newer) tags into an ID3v2.3 tag.
This updates incompatible ID3v2 frames to ID3v2.3 ones. If you
intend to save tags as ID3v2.3, you must call this function
at some point.
If you want to to go off spec and include some v2.4 frames
in v2.3, remove them before calling this and add them back afterwards.
"""
self.__update_common()
# TMCL, TIPL -> TIPL
if "TIPL" in self or "TMCL" in self:
people = []
if "TIPL" in self:
f = self.pop("TIPL")
people.extend(f.people)
if "TMCL" in self:
f = self.pop("TMCL")
people.extend(f.people)
if "IPLS" not in self:
self.add(IPLS(encoding=f.encoding, people=people))
# TDOR -> TORY
if "TDOR" in self:
f = self.pop("TDOR")
if f.text:
d = f.text[0]
if d.year and "TORY" not in self:
self.add(TORY(encoding=f.encoding, text="%04d" % d.year))
# TDRC -> TYER, TDAT, TIME
if "TDRC" in self:
f = self.pop("TDRC")
if f.text:
d = f.text[0]
if d.year and "TYER" not in self:
self.add(TYER(encoding=f.encoding, text="%04d" % d.year))
if d.month and d.day and "TDAT" not in self:
self.add(TDAT(encoding=f.encoding,
text="%02d%02d" % (d.day, d.month)))
if d.hour and d.minute and "TIME" not in self:
self.add(TIME(encoding=f.encoding,
text="%02d%02d" % (d.hour, d.minute)))
# New frames added in v2.4
v24_frames = [
'ASPI', 'EQU2', 'RVA2', 'SEEK', 'SIGN', 'TDEN', 'TDOR',
'TDRC', 'TDRL', 'TDTG', 'TIPL', 'TMCL', 'TMOO', 'TPRO',
'TSOA', 'TSOP', 'TSOT', 'TSST',
]
for key in v24_frames:
if key in self:
del(self[key])
# Recurse into chapters
for f in self.getall("CHAP"):
f.sub_frames.update_to_v23()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v23()
def _copy(self):
"""Creates a shallow copy of all tags"""
items = self.items()
subs = {}
for f in (self.getall("CHAP") + self.getall("CTOC")):
subs[f.HashKey] = f.sub_frames._copy()
return (items, subs)
def _restore(self, value):
"""Restores the state copied with _copy()"""
items, subs = value
self.clear()
for key, value in items:
self[key] = value
if key in subs:
value.sub_frames._restore(subs[key])
def save_frame(frame, name=None, config=None):
if config is None:
config = ID3SaveConfig()
flags = 0
if isinstance(frame, TextFrame):
if len(str(frame)) == 0:
return b''
framedata = frame._writeData(config)
usize = len(framedata)
if usize > 2048:
# Disabled as this causes iTunes and other programs
# to fail to find these frames, which usually includes
# e.g. APIC.
# framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib')
# flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN
pass
if config.v2_version == 4:
bits = 7
elif config.v2_version == 3:
bits = 8
else:
raise ValueError
datasize = BitPaddedInt.to_str(len(framedata), width=4, bits=bits)
if name is not None:
assert isinstance(name, bytes)
frame_name = name
else:
frame_name = type(frame).__name__
frame_name = frame_name.encode("ascii")
header = struct.pack('>4s4sH', frame_name, datasize, flags)
return header + framedata
def read_frames(id3, data, frames):
"""Does not error out"""
assert id3.version >= ID3Header._V22
result = []
unsupported_frames = []
if id3.version < ID3Header._V24 and id3.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
pass
if id3.version >= ID3Header._V23:
if id3.version < ID3Header._V24:
bpi = int
else:
bpi = determine_bpi(data, frames)
while data:
header = data[:10]
try:
name, size, flags = struct.unpack('>4sLH', header)
except struct.error:
break # not enough header
if name.strip(b'\x00') == b'':
break
size = bpi(size)
framedata = data[10:10 + size]
data = data[10 + size:]
if size == 0:
continue # drop empty frames
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
# someone writes 2.3 frames with 2.2 names
if name[-1] == "\x00":
tag = Frames_2_2[name[:-1]]
name = tag.__base__.__name__
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(tag._fromData(id3, flags, framedata))
except NotImplementedError:
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
elif id3.version >= ID3Header._V22:
while data:
header = data[0:6]
try:
name, size = struct.unpack('>3s3s', header)
except struct.error:
break # not enough header
size, = struct.unpack('>L', b'\x00' + size)
if name.strip(b'\x00') == b'':
break
framedata = data[6:6 + size]
data = data[6 + size:]
if size == 0:
continue # drop empty frames
try:
name = name.decode('ascii')
except UnicodeDecodeError:
continue
try:
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(
tag._fromData(id3, 0, framedata))
except (ID3EncryptionUnsupportedError,
NotImplementedError):
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
return result, unsupported_frames, data
| 20,827
|
Python
|
.py
| 515
| 28.924272
| 78
| 0.537258
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,532
|
moggsplit.py
|
rembo10_headphones/lib/mutagen/_tools/moggsplit.py
|
# -*- coding: utf-8 -*-
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Split a multiplex/chained Ogg file into its component parts."""
import os
import mutagen.ogg
from mutagen._senf import argv
from ._util import SignalHandler, OptionParser
_sig = SignalHandler()
def main(argv):
from mutagen.ogg import OggPage
parser = OptionParser(
usage="%prog [options] filename.ogg ...",
description="Split Ogg logical streams using Mutagen.",
version="Mutagen %s" % ".".join(map(str, mutagen.version))
)
parser.add_option(
"--extension", dest="extension", default="ogg", metavar='ext',
help="use this extension (default 'ogg')")
parser.add_option(
"--pattern", dest="pattern", default="%(base)s-%(stream)d.%(ext)s",
metavar='pattern', help="name files using this pattern")
parser.add_option(
"--m3u", dest="m3u", action="store_true", default=False,
help="generate an m3u (playlist) file")
(options, args) = parser.parse_args(argv[1:])
if not args:
raise SystemExit(parser.print_help() or 1)
format = {'ext': options.extension}
for filename in args:
with _sig.block():
fileobjs = {}
format["base"] = os.path.splitext(os.path.basename(filename))[0]
with open(filename, "rb") as fileobj:
if options.m3u:
m3u = open(format["base"] + ".m3u", "w")
fileobjs["m3u"] = m3u
else:
m3u = None
while True:
try:
page = OggPage(fileobj)
except EOFError:
break
else:
format["stream"] = page.serial
if page.serial not in fileobjs:
new_filename = options.pattern % format
new_fileobj = open(new_filename, "wb")
fileobjs[page.serial] = new_fileobj
if m3u:
m3u.write(new_filename + "\r\n")
fileobjs[page.serial].write(page.write())
for f in fileobjs.values():
f.close()
def entry_point():
_sig.init()
return main(argv)
| 2,580
|
Python
|
.py
| 62
| 30.080645
| 76
| 0.553293
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,533
|
mutagen_inspect.py
|
rembo10_headphones/lib/mutagen/_tools/mutagen_inspect.py
|
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Full tag list for any given file."""
from mutagen._senf import print_, argv
from ._util import SignalHandler, OptionParser
_sig = SignalHandler()
def main(argv):
from mutagen import File
parser = OptionParser()
parser.add_option("--no-flac", help="Compatibility; does nothing.")
parser.add_option("--no-mp3", help="Compatibility; does nothing.")
parser.add_option("--no-apev2", help="Compatibility; does nothing.")
(options, args) = parser.parse_args(argv[1:])
if not args:
raise SystemExit(parser.print_help() or 1)
for filename in args:
print_(u"--", filename)
try:
print_(u"-", File(filename).pprint())
except AttributeError:
print_(u"- Unknown file type")
except Exception as err:
print_(str(err))
print_(u"")
def entry_point():
_sig.init()
return main(argv)
| 1,207
|
Python
|
.py
| 32
| 32.46875
| 72
| 0.66896
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,534
|
_util.py
|
rembo10_headphones/lib/mutagen/_tools/_util.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import signal
import contextlib
import optparse
from mutagen._senf import print_
from mutagen._util import iterbytes
def split_escape(string, sep, maxsplit=None, escape_char="\\"):
"""Like unicode/str/bytes.split but allows for the separator to be escaped
If passed unicode/str/bytes will only return list of unicode/str/bytes.
"""
assert len(sep) == 1
assert len(escape_char) == 1
if isinstance(string, bytes):
if isinstance(escape_char, str):
escape_char = escape_char.encode("ascii")
iter_ = iterbytes
else:
iter_ = iter
if maxsplit is None:
maxsplit = len(string)
empty = string[:0]
result = []
current = empty
escaped = False
for char in iter_(string):
if escaped:
if char != escape_char and char != sep:
current += escape_char
current += char
escaped = False
else:
if char == escape_char:
escaped = True
elif char == sep and len(result) < maxsplit:
result.append(current)
current = empty
else:
current += char
result.append(current)
return result
class SignalHandler(object):
def __init__(self):
self._interrupted = False
self._nosig = False
self._init = False
def init(self):
signal.signal(signal.SIGINT, self._handler)
signal.signal(signal.SIGTERM, self._handler)
if os.name != "nt":
signal.signal(signal.SIGHUP, self._handler)
def _handler(self, signum, frame):
self._interrupted = True
if not self._nosig:
raise SystemExit("Aborted...")
@contextlib.contextmanager
def block(self):
"""While this context manager is active any signals for aborting
the process will be queued and exit the program once the context
is left.
"""
self._nosig = True
yield
self._nosig = False
if self._interrupted:
raise SystemExit("Aborted...")
class OptionParser(optparse.OptionParser):
"""OptionParser subclass which supports printing Unicode under Windows"""
def print_help(self, file=None):
print_(self.format_help(), file=file)
| 2,629
|
Python
|
.py
| 76
| 27.131579
| 78
| 0.630624
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,535
|
__init__.py
|
rembo10_headphones/lib/mutagen/_tools/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
| 308
|
Python
|
.py
| 7
| 43
| 70
| 0.76412
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,536
|
mid3cp.py
|
rembo10_headphones/lib/mutagen/_tools/mid3cp.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Marcus Sundman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""A program replicating the functionality of id3lib's id3cp, using mutagen for
tag loading and saving.
"""
import sys
import os.path
import mutagen
import mutagen.id3
from mutagen._senf import print_, argv
from ._util import SignalHandler, OptionParser
VERSION = (0, 1)
_sig = SignalHandler()
def printerr(*args, **kwargs):
kwargs.setdefault("file", sys.stderr)
print_(*args, **kwargs)
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = mutagen.version_string
my_version = ".".join(map(str, VERSION))
version = "mid3cp %s\nUses Mutagen %s" % (my_version, mutagen_version)
self.disable_interspersed_args()
OptionParser.__init__(
self, version=version,
usage="%prog [option(s)] <src> <dst>",
description=("Copies ID3 tags from <src> to <dst>. Mutagen-based "
"replacement for id3lib's id3cp."))
def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
"""Returns 0 on success"""
if excluded_tags is None:
excluded_tags = []
try:
id3 = mutagen.id3.ID3(src, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found in ", src, file=sys.stderr)
return 1
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
if verbose:
print_(u"File", src, u"contains:", file=sys.stderr)
print_(id3.pprint(), file=sys.stderr)
for tag in excluded_tags:
id3.delall(tag)
if merge:
try:
target = mutagen.id3.ID3(dst, translate=False)
except mutagen.id3.ID3NoHeaderError:
# no need to merge
pass
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
else:
for frame in id3.values():
target.add(frame)
id3 = target
# if the source is 2.3 save it as 2.3
if id3.version < (2, 4, 0):
id3.update_to_v23()
v2_version = 3
else:
id3.update_to_v24()
v2_version = 4
try:
id3.save(dst, v1=(2 if write_v1 else 0), v2_version=v2_version)
except Exception as err:
print_(u"Error saving", dst, u":\n%s" % str(err),
file=sys.stderr)
return 1
else:
if verbose:
print_(u"Successfully saved", dst, file=sys.stderr)
return 0
def main(argv):
parser = ID3OptionParser()
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="print out saved tags", default=False)
parser.add_option("--write-v1", action="store_true", dest="write_v1",
default=False, help="write id3v1 tags")
parser.add_option("-x", "--exclude-tag", metavar="TAG", action="append",
dest="x", help="exclude the specified tag", default=[])
parser.add_option("--merge", action="store_true",
help="Copy over frames instead of the whole ID3 tag",
default=False)
(options, args) = parser.parse_args(argv[1:])
if len(args) != 2:
parser.print_help(file=sys.stderr)
return 1
(src, dst) = args
if not os.path.isfile(src):
print_(u"File not found:", src, file=sys.stderr)
parser.print_help(file=sys.stderr)
return 1
if not os.path.isfile(dst):
printerr(u"File not found:", dst, file=sys.stderr)
parser.print_help(file=sys.stderr)
return 1
# Strip tags - "-x FOO" adds whitespace at the beginning of the tag name
excluded_tags = [x.strip() for x in options.x]
with _sig.block():
return copy(src, dst, options.merge, options.write_v1, excluded_tags,
options.verbose)
def entry_point():
_sig.init()
return main(argv)
| 4,212
|
Python
|
.py
| 111
| 30.063063
| 79
| 0.613363
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,537
|
mid3iconv.py
|
rembo10_headphones/lib/mutagen/_tools/mid3iconv.py
|
# -*- coding: utf-8 -*-
# Copyright 2006 Emfox Zhou <EmfoxZhou@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
ID3iconv is a Java based ID3 encoding convertor, here's the Python version.
"""
import sys
import locale
import mutagen
import mutagen.id3
from mutagen._senf import argv, print_, fsnative
from ._util import SignalHandler, OptionParser
VERSION = (0, 3)
_sig = SignalHandler()
def getpreferredencoding():
return locale.getpreferredencoding() or "utf-8"
def isascii(string):
"""Checks whether a unicode string is non-empty and contains only ASCII
characters.
"""
if not string:
return False
try:
string.encode('ascii')
except UnicodeEncodeError:
return False
return True
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = ".".join(map(str, mutagen.version))
my_version = ".".join(map(str, VERSION))
version = "mid3iconv %s\nUses Mutagen %s" % (
my_version, mutagen_version)
return OptionParser.__init__(
self, version=version,
usage="%prog [OPTION] [FILE]...",
description=("Mutagen-based replacement the id3iconv utility, "
"which converts ID3 tags from legacy encodings "
"to Unicode and stores them using the ID3v2 format."))
def format_help(self, *args, **kwargs):
text = OptionParser.format_help(self, *args, **kwargs)
return text + "\nFiles are updated in-place, so use --dry-run first.\n"
def update(options, filenames):
encoding = options.encoding or getpreferredencoding()
verbose = options.verbose
noupdate = options.noupdate
force_v1 = options.force_v1
remove_v1 = options.remove_v1
def conv(uni):
return uni.encode('iso-8859-1').decode(encoding)
for filename in filenames:
with _sig.block():
if verbose != "quiet":
print_(u"Updating", filename)
if has_id3v1(filename) and not noupdate and force_v1:
mutagen.id3.delete(filename, False, True)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose != "quiet":
print_(u"No ID3 header found; skipping...")
continue
except Exception as err:
print_(str(err), file=sys.stderr)
continue
for tag in filter(lambda t: t.startswith(("T", "COMM")), id3):
frame = id3[tag]
if isinstance(frame, mutagen.id3.TimeStampTextFrame):
# non-unicode fields
continue
try:
text = frame.text
except AttributeError:
continue
try:
text = [conv(x) for x in frame.text]
except (UnicodeError, LookupError):
continue
else:
frame.text = text
if not text or min(map(isascii, text)):
frame.encoding = 3
else:
frame.encoding = 1
if verbose == "debug":
print_(id3.pprint())
if not noupdate:
if remove_v1:
id3.save(filename, v1=False)
else:
id3.save(filename)
def has_id3v1(filename):
try:
with open(filename, 'rb') as f:
f.seek(-128, 2)
return f.read(3) == b"TAG"
except IOError:
return False
def main(argv):
parser = ID3OptionParser()
parser.add_option(
"-e", "--encoding", metavar="ENCODING", action="store",
type="string", dest="encoding",
help=("Specify original tag encoding (default is %s)" % (
getpreferredencoding())))
parser.add_option(
"-p", "--dry-run", action="store_true", dest="noupdate",
help="Do not actually modify files")
parser.add_option(
"--force-v1", action="store_true", dest="force_v1",
help="Use an ID3v1 tag even if an ID3v2 tag is present")
parser.add_option(
"--remove-v1", action="store_true", dest="remove_v1",
help="Remove v1 tag after processing the files")
parser.add_option(
"-q", "--quiet", action="store_const", dest="verbose",
const="quiet", help="Only output errors")
parser.add_option(
"-d", "--debug", action="store_const", dest="verbose",
const="debug", help="Output updated tags")
for i, arg in enumerate(argv):
if arg == "-v1":
argv[i] = fsnative(u"--force-v1")
elif arg == "-removev1":
argv[i] = fsnative(u"--remove-v1")
(options, args) = parser.parse_args(argv[1:])
if args:
update(options, args)
else:
parser.print_help()
def entry_point():
_sig.init()
return main(argv)
| 5,238
|
Python
|
.py
| 137
| 28.350365
| 79
| 0.577151
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,538
|
mutagen_pony.py
|
rembo10_headphones/lib/mutagen/_tools/mutagen_pony.py
|
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig, Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
import traceback
from mutagen._senf import print_, argv
from ._util import SignalHandler
class Report(object):
def __init__(self, pathname):
self.name = pathname
self.files = 0
self.unsync = 0
self.missings = 0
self.errors = []
self.exceptions = {}
self.versions = {}
def missing(self, filename):
self.missings += 1
self.files += 1
def error(self, filename):
Ex, value, trace = sys.exc_info()
self.exceptions.setdefault(Ex, 0)
self.exceptions[Ex] += 1
self.errors.append((filename, Ex, value, trace))
self.files += 1
def success(self, id3):
self.versions.setdefault(id3.version, 0)
self.versions[id3.version] += 1
self.files += 1
if id3.f_unsynch:
self.unsync += 1
def __str__(self):
strings = ["-- Report for %s --" % self.name]
if self.files == 0:
return strings[0] + "\n" + "No MP3 files found.\n"
good = self.files - len(self.errors)
strings.append("Loaded %d/%d files (%d%%)" % (
good, self.files, (float(good) / self.files) * 100))
strings.append("%d files with unsynchronized frames." % self.unsync)
strings.append("%d files without tags." % self.missings)
strings.append("\nID3 Versions:")
items = list(self.versions.items())
items.sort()
for v, i in items:
strings.append(" %s\t%d" % (".".join(map(str, v)), i))
if self.exceptions:
strings.append("\nExceptions:")
items = list(self.exceptions.items())
items.sort()
for Ex, i in items:
strings.append(" %-20s\t%d" % (Ex.__name__, i))
if self.errors:
strings.append("\nERRORS:\n")
for filename, Ex, value, trace in self.errors:
strings.append("\nReading %s:" % filename)
strings.append(
"".join(traceback.format_exception(Ex, value, trace)[1:]))
else:
strings.append("\nNo errors!")
return("\n".join(strings))
def check_dir(path):
from mutagen.mp3 import MP3
rep = Report(path)
print_(u"Scanning", path)
for path, dirs, files in os.walk(path):
files.sort()
for fn in files:
if not fn.lower().endswith('.mp3'):
continue
ffn = os.path.join(path, fn)
try:
mp3 = MP3(ffn)
except Exception:
rep.error(ffn)
else:
if mp3.tags is None:
rep.missing(ffn)
else:
rep.success(mp3.tags)
print_(str(rep))
def main(argv):
if len(argv) == 1:
print_(u"Usage:", argv[0], u"directory ...")
else:
for path in argv[1:]:
check_dir(path)
def entry_point():
SignalHandler().init()
return main(argv)
| 3,332
|
Python
|
.py
| 94
| 26.382979
| 78
| 0.558458
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,539
|
mid3v2.py
|
rembo10_headphones/lib/mutagen/_tools/mid3v2.py
|
# -*- coding: utf-8 -*-
# Copyright 2005 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Pretend to be /usr/bin/id3v2 from id3lib, sort of."""
import sys
import codecs
import mimetypes
import warnings
from optparse import SUPPRESS_HELP
import mutagen
import mutagen.id3
from mutagen.id3 import Encoding, PictureType
from mutagen._senf import fsnative, print_, argv, fsn2text, fsn2bytes, \
bytes2fsn
from ._util import split_escape, SignalHandler, OptionParser
VERSION = (1, 3)
_sig = SignalHandler()
global verbose
verbose = True
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = ".".join(map(str, mutagen.version))
my_version = ".".join(map(str, VERSION))
version = "mid3v2 %s\nUses Mutagen %s" % (my_version, mutagen_version)
self.edits = []
OptionParser.__init__(
self, version=version,
usage="%prog [OPTION] [FILE]...",
description="Mutagen-based replacement for id3lib's id3v2.")
def format_help(self, *args, **kwargs):
text = OptionParser.format_help(self, *args, **kwargs)
return text + """\
You can set the value for any ID3v2 frame by using '--' and then a frame ID.
For example:
mid3v2 --TIT3 "Monkey!" file.mp3
would set the "Subtitle/Description" frame to "Monkey!".
Any editing operation will cause the ID3 tag to be upgraded to ID3v2.4.
"""
def list_frames(option, opt, value, parser):
items = mutagen.id3.Frames.items()
for name, frame in sorted(items):
print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
raise SystemExit
def list_frames_2_2(option, opt, value, parser):
items = mutagen.id3.Frames_2_2.items()
items.sort()
for name, frame in items:
print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0]))
raise SystemExit
def list_genres(option, opt, value, parser):
for i, genre in enumerate(mutagen.id3.TCON.GENRES):
print_(u"%3d: %s" % (i, genre))
raise SystemExit
def delete_tags(filenames, v1, v2):
for filename in filenames:
with _sig.block():
if verbose:
print_(u"deleting ID3 tag info in", filename, file=sys.stderr)
mutagen.id3.delete(filename, v1, v2)
def delete_frames(deletes, filenames):
try:
deletes = frame_from_fsnative(deletes)
except ValueError as err:
print_(str(err), file=sys.stderr)
frames = deletes.split(",")
for filename in filenames:
with _sig.block():
if verbose:
print_(u"deleting %s from" % deletes, filename,
file=sys.stderr)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose:
print_(u"No ID3 header found; skipping.", file=sys.stderr)
except Exception as err:
print_(str(err), file=sys.stderr)
raise SystemExit(1)
else:
for frame in frames:
id3.delall(frame)
id3.save()
def frame_from_fsnative(arg):
"""Takes item from argv and returns ascii native str
or raises ValueError.
"""
assert isinstance(arg, fsnative)
text = fsn2text(arg, strict=True)
return text.encode("ascii").decode("ascii")
def value_from_fsnative(arg, escape):
"""Takes an item from argv and returns a str value without
surrogate escapes or raises ValueError.
"""
assert isinstance(arg, fsnative)
if escape:
bytes_ = fsn2bytes(arg)
# With py3.7 this has started to warn for invalid escapes, but we
# don't control the input so ignore it.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bytes_ = codecs.escape_decode(bytes_)[0]
arg = bytes2fsn(bytes_)
text = fsn2text(arg, strict=True)
return text
def error(*args):
print_(*args, file=sys.stderr)
raise SystemExit(1)
def get_frame_encoding(frame_id, value):
if frame_id == "APIC":
# See https://github.com/beetbox/beets/issues/899#issuecomment-62437773
return Encoding.UTF16 if value else Encoding.LATIN1
else:
return Encoding.UTF8
def write_files(edits, filenames, escape):
# unescape escape sequences and decode values
encoded_edits = []
for frame, value in edits:
if not value:
continue
try:
frame = frame_from_fsnative(frame)
except ValueError as err:
print_(str(err), file=sys.stderr)
assert isinstance(frame, str)
# strip "--"
frame = frame[2:]
try:
value = value_from_fsnative(value, escape)
except ValueError as err:
error(u"%s: %s" % (frame, str(err)))
assert isinstance(value, str)
encoded_edits.append((frame, value))
edits = encoded_edits
# preprocess:
# for all [frame,value] pairs in the edits list
# gather values for identical frames into a list
tmp = {}
for frame, value in edits:
if frame in tmp:
tmp[frame].append(value)
else:
tmp[frame] = [value]
# edits is now a dictionary of frame -> [list of values]
edits = tmp
# escape also enables escaping of the split separator
if escape:
string_split = split_escape
else:
string_split = lambda s, *args, **kwargs: s.split(*args, **kwargs)
for filename in filenames:
with _sig.block():
if verbose:
print_(u"Writing", filename, file=sys.stderr)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose:
print_(u"No ID3 header found; creating a new tag",
file=sys.stderr)
id3 = mutagen.id3.ID3()
except Exception as err:
print_(str(err), file=sys.stderr)
continue
for (frame, vlist) in edits.items():
if frame == "POPM":
for value in vlist:
values = string_split(value, ":")
if len(values) == 1:
email, rating, count = values[0], 0, 0
elif len(values) == 2:
email, rating, count = values[0], values[1], 0
else:
email, rating, count = values
frame = mutagen.id3.POPM(
email=email, rating=int(rating), count=int(count))
id3.add(frame)
elif frame == "APIC":
for value in vlist:
values = string_split(value, ":")
# FIXME: doesn't support filenames with an invalid
# encoding since we have already decoded at that point
fn = values[0]
if len(values) >= 2:
desc = values[1]
else:
desc = u"cover"
if len(values) >= 3:
try:
picture_type = int(values[2])
except ValueError:
error(u"Invalid picture type: %r" % values[1])
else:
picture_type = PictureType.COVER_FRONT
if len(values) >= 4:
mime = values[3]
else:
mime = mimetypes.guess_type(fn)[0] or "image/jpeg"
if len(values) >= 5:
error("APIC: Invalid format")
encoding = get_frame_encoding(frame, desc)
try:
with open(fn, "rb") as h:
data = h.read()
except IOError as e:
error(str(e))
frame = mutagen.id3.APIC(encoding=encoding, mime=mime,
desc=desc, type=picture_type, data=data)
id3.add(frame)
elif frame == "COMM":
for value in vlist:
values = string_split(value, ":")
if len(values) == 1:
value, desc, lang = values[0], "", "eng"
elif len(values) == 2:
desc, value, lang = values[0], values[1], "eng"
else:
value = ":".join(values[1:-1])
desc, lang = values[0], values[-1]
frame = mutagen.id3.COMM(
encoding=3, text=value, lang=lang, desc=desc)
id3.add(frame)
elif frame == "USLT":
for value in vlist:
values = string_split(value, ":")
if len(values) == 1:
value, desc, lang = values[0], "", "eng"
elif len(values) == 2:
desc, value, lang = values[0], values[1], "eng"
else:
value = ":".join(values[1:-1])
desc, lang = values[0], values[-1]
frame = mutagen.id3.USLT(
encoding=3, text=value, lang=lang, desc=desc)
id3.add(frame)
elif frame == "UFID":
for value in vlist:
values = string_split(value, ":")
if len(values) != 2:
error(u"Invalid value: %r" % values)
owner = values[0]
data = values[1].encode("utf-8")
frame = mutagen.id3.UFID(owner=owner, data=data)
id3.add(frame)
elif frame == "TXXX":
for value in vlist:
values = string_split(value, ":", 1)
if len(values) == 1:
desc, value = "", values[0]
else:
desc, value = values[0], values[1]
frame = mutagen.id3.TXXX(
encoding=3, text=value, desc=desc)
id3.add(frame)
elif frame == "WXXX":
for value in vlist:
values = string_split(value, ":", 1)
if len(values) == 1:
desc, value = "", values[0]
else:
desc, value = values[0], values[1]
frame = mutagen.id3.WXXX(
encoding=3, url=value, desc=desc)
id3.add(frame)
elif issubclass(mutagen.id3.Frames[frame],
mutagen.id3.UrlFrame):
frame = mutagen.id3.Frames[frame](
encoding=3, url=vlist[-1])
id3.add(frame)
else:
frame = mutagen.id3.Frames[frame](encoding=3, text=vlist)
id3.add(frame)
id3.save(filename)
def list_tags(filenames):
for filename in filenames:
print_("IDv2 tag info for", filename)
try:
id3 = mutagen.id3.ID3(filename, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found; skipping.")
except Exception as err:
print_(str(err), file=sys.stderr)
raise SystemExit(1)
else:
print_(id3.pprint())
def list_tags_raw(filenames):
for filename in filenames:
print_("Raw IDv2 tag info for", filename)
try:
id3 = mutagen.id3.ID3(filename, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found; skipping.")
except Exception as err:
print_(str(err), file=sys.stderr)
raise SystemExit(1)
else:
for frame in id3.values():
print_(str(repr(frame)))
def main(argv):
parser = ID3OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true", dest="verbose", default=False,
help="be verbose")
parser.add_option(
"-q", "--quiet", action="store_false", dest="verbose",
help="be quiet (the default)")
parser.add_option(
"-e", "--escape", action="store_true", default=False,
help="enable interpretation of backslash escapes")
parser.add_option(
"-f", "--list-frames", action="callback", callback=list_frames,
help="Display all possible frames for ID3v2.3 / ID3v2.4")
parser.add_option(
"--list-frames-v2.2", action="callback", callback=list_frames_2_2,
help="Display all possible frames for ID3v2.2")
parser.add_option(
"-L", "--list-genres", action="callback", callback=list_genres,
help="Lists all ID3v1 genres")
parser.add_option(
"-l", "--list", action="store_const", dest="action", const="list",
help="Lists the tag(s) on the open(s)")
parser.add_option(
"--list-raw", action="store_const", dest="action", const="list-raw",
help="Lists the tag(s) on the open(s) in Python format")
parser.add_option(
"-d", "--delete-v2", action="store_const", dest="action",
const="delete-v2", help="Deletes ID3v2 tags")
parser.add_option(
"-s", "--delete-v1", action="store_const", dest="action",
const="delete-v1", help="Deletes ID3v1 tags")
parser.add_option(
"-D", "--delete-all", action="store_const", dest="action",
const="delete-v1-v2", help="Deletes ID3v1 and ID3v2 tags")
parser.add_option(
'--delete-frames', metavar='FID1,FID2,...', action='store',
dest='deletes', default='', help="Delete the given frames")
parser.add_option(
"-C", "--convert", action="store_const", dest="action",
const="convert",
help="Convert tags to ID3v2.4 (any editing will do this)")
parser.add_option(
"-a", "--artist", metavar='"ARTIST"', action="callback",
help="Set the artist information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TPE1"),
args[2])))
parser.add_option(
"-A", "--album", metavar='"ALBUM"', action="callback",
help="Set the album title information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TALB"),
args[2])))
parser.add_option(
"-t", "--song", metavar='"SONG"', action="callback",
help="Set the song title information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TIT2"),
args[2])))
parser.add_option(
"-c", "--comment", metavar='"DESCRIPTION":"COMMENT":"LANGUAGE"',
action="callback", help="Set the comment information", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--COMM"),
args[2])))
parser.add_option(
"-p", "--picture",
metavar='"FILENAME":"DESCRIPTION":"IMAGE-TYPE":"MIME-TYPE"',
action="callback", help="Set the picture", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--APIC"),
args[2])))
parser.add_option(
"-g", "--genre", metavar='"GENRE"', action="callback",
help="Set the genre or genre number", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TCON"),
args[2])))
parser.add_option(
"-y", "--year", "--date", metavar='YYYY[-MM-DD]', action="callback",
help="Set the year/date", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TDRC"),
args[2])))
parser.add_option(
"-T", "--track", metavar='"num/num"', action="callback",
help="Set the track number/(optional) total tracks", type="string",
callback=lambda *args: args[3].edits.append((fsnative(u"--TRCK"),
args[2])))
for key, frame in mutagen.id3.Frames.items():
if (issubclass(frame, mutagen.id3.TextFrame)
or issubclass(frame, mutagen.id3.UrlFrame)
or issubclass(frame, mutagen.id3.POPM)
or frame in (mutagen.id3.APIC, mutagen.id3.UFID,
mutagen.id3.USLT)):
parser.add_option(
"--" + key, action="callback", help=SUPPRESS_HELP,
type='string', metavar="value", # optparse blows up with this
callback=lambda *args: args[3].edits.append(args[1:3]))
(options, args) = parser.parse_args(argv[1:])
global verbose
verbose = options.verbose
if args:
if parser.edits or options.deletes:
if options.deletes:
delete_frames(options.deletes, args)
if parser.edits:
write_files(parser.edits, args, options.escape)
elif options.action in [None, 'list']:
list_tags(args)
elif options.action == "list-raw":
list_tags_raw(args)
elif options.action == "convert":
write_files([], args, options.escape)
elif options.action.startswith("delete"):
delete_tags(args, "v1" in options.action, "v2" in options.action)
else:
parser.print_help()
else:
parser.print_help()
def entry_point():
_sig.init()
return main(argv)
| 18,542
|
Python
|
.py
| 418
| 30.641148
| 79
| 0.520549
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,540
|
_environ.py
|
rembo10_headphones/lib/mutagen/_senf/_environ.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import ctypes
try:
from collections import abc
except ImportError:
import collections as abc # type: ignore
from ._compat import text_type, PY2
from ._fsnative import path2fsn, is_win, _fsn2legacy, fsnative
from . import _winapi as winapi
def get_windows_env_var(key):
"""Get an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
buf = ctypes.create_unicode_buffer(32767)
stored = winapi.GetEnvironmentVariableW(key, buf, 32767)
if stored == 0:
raise ctypes.WinError()
return buf[:stored]
def set_windows_env_var(key, value):
"""Set an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
if not isinstance(value, text_type):
raise TypeError("%r not of type %r" % (value, text_type))
status = winapi.SetEnvironmentVariableW(key, value)
if status == 0:
raise ctypes.WinError()
def del_windows_env_var(key):
"""Delete an env var.
Raises:
WindowsError
"""
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
status = winapi.SetEnvironmentVariableW(key, None)
if status == 0:
raise ctypes.WinError()
def read_windows_environ():
"""Returns a unicode dict of the Windows environment.
Raises:
WindowsEnvironError
"""
res = winapi.GetEnvironmentStringsW()
if not res:
raise ctypes.WinError()
res = ctypes.cast(res, ctypes.POINTER(ctypes.c_wchar))
done = []
current = u""
i = 0
while 1:
c = res[i]
i += 1
if c == u"\x00":
if not current:
break
done.append(current)
current = u""
continue
current += c
dict_ = {}
for entry in done:
try:
key, value = entry.split(u"=", 1)
except ValueError:
continue
key = _norm_key(key)
dict_[key] = value
status = winapi.FreeEnvironmentStringsW(res)
if status == 0:
raise ctypes.WinError()
return dict_
def _norm_key(key):
assert isinstance(key, fsnative)
if is_win:
key = key.upper()
return key
class Environ(abc.MutableMapping):
"""Dict[`fsnative`, `fsnative`]: Like `os.environ` but contains unicode
keys and values under Windows + Python 2.
Any changes made will be forwarded to `os.environ`.
"""
def __init__(self):
if is_win and PY2:
try:
env = read_windows_environ()
except WindowsError:
env = {}
else:
env = os.environ
self._env = env
def __getitem__(self, key):
key = _norm_key(path2fsn(key))
return self._env[key]
def __setitem__(self, key, value):
key = _norm_key(path2fsn(key))
value = path2fsn(value)
if is_win and PY2:
# this calls putenv, so do it first and replace later
try:
os.environ[_fsn2legacy(key)] = _fsn2legacy(value)
except OSError:
raise ValueError
try:
set_windows_env_var(key, value)
except WindowsError:
# py3+win fails for invalid keys. try to do the same
raise ValueError
try:
self._env[key] = value
except OSError:
raise ValueError
def __delitem__(self, key):
key = _norm_key(path2fsn(key))
if is_win and PY2:
try:
del_windows_env_var(key)
except WindowsError:
pass
try:
del os.environ[_fsn2legacy(key)]
except KeyError:
pass
del self._env[key]
def __iter__(self):
return iter(self._env)
def __len__(self):
return len(self._env)
def __repr__(self):
return repr(self._env)
def copy(self):
return self._env.copy()
environ = Environ()
def getenv(key, value=None):
"""Like `os.getenv` but returns unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (object): The value to return if the env var does not exist
Returns:
`fsnative` or `object`:
The env var or the passed value if it doesn't exist
"""
key = path2fsn(key)
if is_win and PY2:
return environ.get(key, value)
return os.getenv(key, value)
def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key)
def putenv(key, value):
"""Like `os.putenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (pathlike): The value to set
Raises:
ValueError
"""
key = path2fsn(key)
value = path2fsn(value)
if is_win and PY2:
try:
set_windows_env_var(key, value)
except WindowsError:
# py3 + win fails here
raise ValueError
else:
try:
os.putenv(key, value)
except OSError:
# win + py3 raise here for invalid keys which is probably a bug.
# ValueError seems better
raise ValueError
| 6,855
|
Python
|
.py
| 209
| 25.344498
| 78
| 0.613667
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,541
|
_compat.py
|
rembo10_headphones/lib/mutagen/_senf/_compat.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
PY2 = sys.version_info[0] == 2
PY3 = not PY2
if PY2:
from urlparse import urlparse, urlunparse
urlparse, urlunparse
from urllib import quote, unquote
quote, unquote
from StringIO import StringIO
BytesIO = StringIO
from io import StringIO as TextIO
TextIO
string_types = (str, unicode)
text_type = unicode
iteritems = lambda d: d.iteritems()
elif PY3:
from urllib.parse import urlparse, quote, unquote, urlunparse
urlparse, quote, unquote, urlunparse
from io import StringIO
StringIO = StringIO
TextIO = StringIO
from io import BytesIO
BytesIO = BytesIO
string_types = (str,)
text_type = str
iteritems = lambda d: iter(d.items())
| 1,852
|
Python
|
.py
| 47
| 36.382979
| 73
| 0.757525
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,542
|
_argv.py
|
rembo10_headphones/lib/mutagen/_senf/_argv.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import ctypes
try:
from collections import abc
except ImportError:
import collections as abc # type: ignore
from functools import total_ordering
from ._compat import PY2, string_types
from ._fsnative import is_win, _fsn2legacy, path2fsn
from . import _winapi as winapi
def _get_win_argv():
"""Returns a unicode argv under Windows and standard sys.argv otherwise
Returns:
List[`fsnative`]
"""
assert is_win
argc = ctypes.c_int()
try:
argv = winapi.CommandLineToArgvW(
winapi.GetCommandLineW(), ctypes.byref(argc))
except WindowsError:
return []
if not argv:
return []
res = argv[max(0, argc.value - len(sys.argv)):argc.value]
winapi.LocalFree(argv)
return res
@total_ordering
class Argv(abc.MutableSequence):
"""List[`fsnative`]: Like `sys.argv` but contains unicode
keys and values under Windows + Python 2.
Any changes made will be forwarded to `sys.argv`.
"""
def __init__(self):
if PY2 and is_win:
self._argv = _get_win_argv()
else:
self._argv = sys.argv
def __getitem__(self, index):
return self._argv[index]
def __setitem__(self, index, value):
if isinstance(value, string_types):
value = path2fsn(value)
self._argv[index] = value
if sys.argv is not self._argv:
try:
if isinstance(value, string_types):
sys.argv[index] = _fsn2legacy(value)
else:
sys.argv[index] = [_fsn2legacy(path2fsn(v)) for v in value]
except IndexError:
pass
def __delitem__(self, index):
del self._argv[index]
try:
del sys.argv[index]
except IndexError:
pass
def __eq__(self, other):
return self._argv == other
def __lt__(self, other):
return self._argv < other
def __len__(self):
return len(self._argv)
def __repr__(self):
return repr(self._argv)
def insert(self, index, value):
value = path2fsn(value)
self._argv.insert(index, value)
if sys.argv is not self._argv:
sys.argv.insert(index, _fsn2legacy(value))
argv = Argv()
| 3,417
|
Python
|
.py
| 93
| 30.505376
| 79
| 0.662117
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,543
|
_winansi.py
|
rembo10_headphones/lib/mutagen/_senf/_winansi.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import ctypes
import re
import atexit
from . import _winapi as winapi
def ansi_parse(code):
"""Returns command, (args)"""
return code[-1:], tuple([int(v or "0") for v in code[2:-1].split(";")])
def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")):
"""Yields (is_ansi, text)"""
for part in _re.split(text):
if part:
yield (bool(_re.match(part)), part)
class AnsiCommand(object):
TEXT = "m"
MOVE_UP = "A"
MOVE_DOWN = "B"
MOVE_FORWARD = "C"
MOVE_BACKWARD = "D"
SET_POS = "H"
SET_POS_ALT = "f"
SAVE_POS = "s"
RESTORE_POS = "u"
class TextAction(object):
RESET_ALL = 0
SET_BOLD = 1
SET_DIM = 2
SET_ITALIC = 3
SET_UNDERLINE = 4
SET_BLINK = 5
SET_BLINK_FAST = 6
SET_REVERSE = 7
SET_HIDDEN = 8
RESET_BOLD = 21
RESET_DIM = 22
RESET_ITALIC = 23
RESET_UNDERLINE = 24
RESET_BLINK = 25
RESET_BLINK_FAST = 26
RESET_REVERSE = 27
RESET_HIDDEN = 28
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
FG_DEFAULT = 39
FG_LIGHT_BLACK = 90
FG_LIGHT_RED = 91
FG_LIGHT_GREEN = 92
FG_LIGHT_YELLOW = 93
FG_LIGHT_BLUE = 94
FG_LIGHT_MAGENTA = 95
FG_LIGHT_CYAN = 96
FG_LIGHT_WHITE = 97
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
BG_DEFAULT = 49
BG_LIGHT_BLACK = 100
BG_LIGHT_RED = 101
BG_LIGHT_GREEN = 102
BG_LIGHT_YELLOW = 103
BG_LIGHT_BLUE = 104
BG_LIGHT_MAGENTA = 105
BG_LIGHT_CYAN = 106
BG_LIGHT_WHITE = 107
class AnsiState(object):
def __init__(self):
self.default_attrs = None
self.bold = False
self.bg_light = False
self.fg_light = False
self.saved_pos = (0, 0)
def do_text_action(self, attrs, action):
# In case the external state has changed, apply it it to ours.
# Mostly the first time this is called.
if attrs & winapi.FOREGROUND_INTENSITY and not self.fg_light \
and not self.bold:
self.fg_light = True
if attrs & winapi.BACKGROUND_INTENSITY and not self.bg_light:
self.bg_light = True
dark_fg = {
TextAction.FG_BLACK: 0,
TextAction.FG_RED: winapi.FOREGROUND_RED,
TextAction.FG_GREEN: winapi.FOREGROUND_GREEN,
TextAction.FG_YELLOW:
winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED,
TextAction.FG_BLUE: winapi.FOREGROUND_BLUE,
TextAction.FG_MAGENTA: winapi.FOREGROUND_BLUE |
winapi.FOREGROUND_RED,
TextAction.FG_CYAN:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN,
TextAction.FG_WHITE:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN |
winapi.FOREGROUND_RED,
}
dark_bg = {
TextAction.BG_BLACK: 0,
TextAction.BG_RED: winapi.BACKGROUND_RED,
TextAction.BG_GREEN: winapi.BACKGROUND_GREEN,
TextAction.BG_YELLOW:
winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED,
TextAction.BG_BLUE: winapi.BACKGROUND_BLUE,
TextAction.BG_MAGENTA:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED,
TextAction.BG_CYAN:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN,
TextAction.BG_WHITE:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN |
winapi.BACKGROUND_RED,
}
light_fg = {
TextAction.FG_LIGHT_BLACK: 0,
TextAction.FG_LIGHT_RED: winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_GREEN: winapi.FOREGROUND_GREEN,
TextAction.FG_LIGHT_YELLOW:
winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_BLUE: winapi.FOREGROUND_BLUE,
TextAction.FG_LIGHT_MAGENTA:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_RED,
TextAction.FG_LIGHT_CYAN:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN,
TextAction.FG_LIGHT_WHITE:
winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN |
winapi.FOREGROUND_RED,
}
light_bg = {
TextAction.BG_LIGHT_BLACK: 0,
TextAction.BG_LIGHT_RED: winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_GREEN: winapi.BACKGROUND_GREEN,
TextAction.BG_LIGHT_YELLOW:
winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_BLUE: winapi.BACKGROUND_BLUE,
TextAction.BG_LIGHT_MAGENTA:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED,
TextAction.BG_LIGHT_CYAN:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN,
TextAction.BG_LIGHT_WHITE:
winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN |
winapi.BACKGROUND_RED,
}
if action == TextAction.RESET_ALL:
attrs = self.default_attrs
self.bold = self.fg_light = self.bg_light = False
elif action == TextAction.SET_BOLD:
self.bold = True
elif action == TextAction.RESET_BOLD:
self.bold = False
elif action == TextAction.SET_DIM:
self.bold = False
elif action == TextAction.SET_REVERSE:
attrs |= winapi.COMMON_LVB_REVERSE_VIDEO
elif action == TextAction.RESET_REVERSE:
attrs &= ~winapi.COMMON_LVB_REVERSE_VIDEO
elif action == TextAction.SET_UNDERLINE:
attrs |= winapi.COMMON_LVB_UNDERSCORE
elif action == TextAction.RESET_UNDERLINE:
attrs &= ~winapi.COMMON_LVB_UNDERSCORE
elif action == TextAction.FG_DEFAULT:
attrs = (attrs & ~0xF) | (self.default_attrs & 0xF)
self.fg_light = False
elif action == TextAction.BG_DEFAULT:
attrs = (attrs & ~0xF0) | (self.default_attrs & 0xF0)
self.bg_light = False
elif action in dark_fg:
attrs = (attrs & ~0xF) | dark_fg[action]
self.fg_light = False
elif action in dark_bg:
attrs = (attrs & ~0xF0) | dark_bg[action]
self.bg_light = False
elif action in light_fg:
attrs = (attrs & ~0xF) | light_fg[action]
self.fg_light = True
elif action in light_bg:
attrs = (attrs & ~0xF0) | light_bg[action]
self.bg_light = True
if self.fg_light or self.bold:
attrs |= winapi.FOREGROUND_INTENSITY
else:
attrs &= ~winapi.FOREGROUND_INTENSITY
if self.bg_light:
attrs |= winapi.BACKGROUND_INTENSITY
else:
attrs &= ~winapi.BACKGROUND_INTENSITY
return attrs
def apply(self, handle, code):
buffer_info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(handle,
ctypes.byref(buffer_info)):
return
attrs = buffer_info.wAttributes
# We take the first attrs we see as default
if self.default_attrs is None:
self.default_attrs = attrs
# Make sure that like with linux terminals the program doesn't
# affect the prompt after it exits
atexit.register(
winapi.SetConsoleTextAttribute, handle, self.default_attrs)
cmd, args = ansi_parse(code)
if cmd == AnsiCommand.TEXT:
for action in args:
attrs = self.do_text_action(attrs, action)
winapi.SetConsoleTextAttribute(handle, attrs)
elif cmd in (AnsiCommand.MOVE_UP, AnsiCommand.MOVE_DOWN,
AnsiCommand.MOVE_FORWARD, AnsiCommand.MOVE_BACKWARD):
coord = buffer_info.dwCursorPosition
x, y = coord.X, coord.Y
amount = max(args[0], 1)
if cmd == AnsiCommand.MOVE_UP:
y -= amount
elif cmd == AnsiCommand.MOVE_DOWN:
y += amount
elif cmd == AnsiCommand.MOVE_FORWARD:
x += amount
elif cmd == AnsiCommand.MOVE_BACKWARD:
x -= amount
x = max(x, 0)
y = max(y, 0)
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))
elif cmd in (AnsiCommand.SET_POS, AnsiCommand.SET_POS_ALT):
args = list(args)
while len(args) < 2:
args.append(0)
x, y = args[:2]
win_rect = buffer_info.srWindow
x += win_rect.Left - 1
y += win_rect.Top - 1
x = max(x, 0)
y = max(y, 0)
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))
elif cmd == AnsiCommand.SAVE_POS:
win_rect = buffer_info.srWindow
coord = buffer_info.dwCursorPosition
x, y = coord.X, coord.Y
x -= win_rect.Left
y -= win_rect.Top
self.saved_pos = (x, y)
elif cmd == AnsiCommand.RESTORE_POS:
win_rect = buffer_info.srWindow
x, y = self.saved_pos
x += win_rect.Left
y += win_rect.Top
winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y))
| 10,599
|
Python
|
.py
| 272
| 29.213235
| 76
| 0.595525
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,544
|
__init__.pyi
|
rembo10_headphones/lib/mutagen/_senf/__init__.pyi
|
import sys
import os
from typing import Text, Union, Any, Optional, Tuple, List, Dict
if sys.version_info[0] == 2:
_pathlike = Union[Text, bytes]
else:
_pathlike = Union[Text, bytes, 'os.PathLike[Any]']
_uri = Union[Text, str]
if sys.version_info[0] == 2:
if sys.platform == "win32":
_base = Text
else:
_base = bytes
else:
_base = Text
class fsnative(_base):
def __init__(self, object: Text=u"") -> None:
...
_fsnative = Union[fsnative, _base]
if sys.platform == "win32":
_bytes_default_encoding = str
else:
_bytes_default_encoding = Optional[str]
def path2fsn(path: _pathlike) -> _fsnative:
...
def fsn2text(path: _fsnative, strict: bool=False) -> Text:
...
def text2fsn(text: Text) -> _fsnative:
...
def fsn2bytes(path: _fsnative, encoding: _bytes_default_encoding="utf-8") -> bytes:
...
def bytes2fsn(data: bytes, encoding: _bytes_default_encoding="utf-8") -> _fsnative:
...
def uri2fsn(uri: _uri) -> _fsnative:
...
def fsn2uri(path: _fsnative) -> Text:
...
def fsn2norm(path: _fsnative) -> _fsnative:
...
sep: _fsnative
pathsep: _fsnative
curdir: _fsnative
pardir: _fsnative
altsep: _fsnative
extsep: _fsnative
devnull: _fsnative
defpath: _fsnative
def getcwd() -> _fsnative:
...
def getenv(key: _pathlike, value: Optional[_fsnative]=None) -> Optional[_fsnative]:
...
def putenv(key: _pathlike, value: _pathlike):
...
def unsetenv(key: _pathlike) -> None:
...
def supports_ansi_escape_codes(fd: int) -> bool:
...
def expandvars(path: _pathlike) -> _fsnative:
...
def expanduser(path: _pathlike) -> _fsnative:
...
environ: Dict[_fsnative,_fsnative]
argv: List[_fsnative]
def gettempdir() -> _fsnative:
pass
def mkstemp(suffix: Optional[_pathlike]=None, prefix: Optional[_pathlike]=None, dir: Optional[_pathlike]=None, text: bool=False) -> Tuple[int, _fsnative]:
...
def mkdtemp(suffix: Optional[_pathlike]=None, prefix: Optional[_pathlike]=None, dir: Optional[_pathlike]=None) -> _fsnative:
...
version_string: str
version: Tuple[int, int, int]
print_ = print
def input_(prompt: Any=None) -> _fsnative:
...
| 2,172
|
Python
|
.py
| 74
| 26.162162
| 154
| 0.665861
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,545
|
_print.py
|
rembo10_headphones/lib/mutagen/_senf/_print.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
import ctypes
import re
from ._fsnative import _encoding, is_win, is_unix, _surrogatepass, bytes2fsn
from ._compat import text_type, PY2, PY3
from ._winansi import AnsiState, ansi_split
from . import _winapi as winapi
def print_(*objects, **kwargs):
"""print_(*objects, sep=None, end=None, file=None, flush=False)
Args:
objects (object): zero or more objects to print
sep (str): Object separator to use, defaults to ``" "``
end (str): Trailing string to use, defaults to ``"\\n"``.
If end is ``"\\n"`` then `os.linesep` is used.
file (object): A file-like object, defaults to `sys.stdout`
flush (bool): If the file stream should be flushed
Raises:
EnvironmentError
Like print(), but:
* Supports printing filenames under Unix + Python 3 and Windows + Python 2
* Emulates ANSI escape sequence support under Windows
* Never fails due to encoding/decoding errors. Tries hard to get everything
on screen as is, but will fall back to "?" if all fails.
This does not conflict with ``colorama``, but will not use it on Windows.
"""
sep = kwargs.get("sep")
sep = sep if sep is not None else " "
end = kwargs.get("end")
end = end if end is not None else "\n"
file = kwargs.get("file")
file = file if file is not None else sys.stdout
flush = bool(kwargs.get("flush", False))
if is_win:
_print_windows(objects, sep, end, file, flush)
else:
_print_unix(objects, sep, end, file, flush)
def _print_unix(objects, sep, end, file, flush):
"""A print_() implementation which writes bytes"""
encoding = _encoding
if isinstance(sep, text_type):
sep = sep.encode(encoding, "replace")
if not isinstance(sep, bytes):
raise TypeError
if isinstance(end, text_type):
end = end.encode(encoding, "replace")
if not isinstance(end, bytes):
raise TypeError
if end == b"\n":
end = os.linesep
if PY3:
end = end.encode("ascii")
parts = []
for obj in objects:
if not isinstance(obj, text_type) and not isinstance(obj, bytes):
obj = text_type(obj)
if isinstance(obj, text_type):
if PY2:
obj = obj.encode(encoding, "replace")
else:
try:
obj = obj.encode(encoding, "surrogateescape")
except UnicodeEncodeError:
obj = obj.encode(encoding, "replace")
assert isinstance(obj, bytes)
parts.append(obj)
data = sep.join(parts) + end
assert isinstance(data, bytes)
file = getattr(file, "buffer", file)
try:
file.write(data)
except TypeError:
if PY3:
# For StringIO, first try with surrogates
surr_data = data.decode(encoding, "surrogateescape")
try:
file.write(surr_data)
except (TypeError, ValueError):
file.write(data.decode(encoding, "replace"))
else:
# for file like objects with don't support bytes
file.write(data.decode(encoding, "replace"))
if flush:
file.flush()
ansi_state = AnsiState()
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush()
def _readline_windows():
"""Raises OSError"""
try:
fileno = sys.stdin.fileno()
except (EnvironmentError, AttributeError):
fileno = -1
# In case stdin is replaced, read from that
if fileno != 0:
return _readline_windows_fallback()
h = winapi.GetStdHandle(winapi.STD_INPUT_HANDLE)
if h == winapi.INVALID_HANDLE_VALUE:
return _readline_windows_fallback()
buf_size = 1024
buf = ctypes.create_string_buffer(buf_size * ctypes.sizeof(winapi.WCHAR))
read = winapi.DWORD()
text = u""
while True:
if winapi.ReadConsoleW(
h, buf, buf_size, ctypes.byref(read), None) == 0:
if not text:
return _readline_windows_fallback()
raise ctypes.WinError()
data = buf[:read.value * ctypes.sizeof(winapi.WCHAR)]
text += data.decode("utf-16-le", _surrogatepass)
if text.endswith(u"\r\n"):
return text[:-2]
def _decode_codepage(codepage, data):
"""
Args:
codepage (int)
data (bytes)
Returns:
`text`
Decodes data using the given codepage. If some data can't be decoded
using the codepage it will not fail.
"""
assert isinstance(data, bytes)
if not data:
return u""
# get the required buffer length first
length = winapi.MultiByteToWideChar(codepage, 0, data, len(data), None, 0)
if length == 0:
raise ctypes.WinError()
# now decode
buf = ctypes.create_unicode_buffer(length)
length = winapi.MultiByteToWideChar(
codepage, 0, data, len(data), buf, length)
if length == 0:
raise ctypes.WinError()
return buf[:]
def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length]
def _readline_windows_fallback():
# In case reading from the console failed (maybe we get piped data)
# we assume the input was generated according to the output encoding.
# Got any better ideas?
assert is_win
cp = winapi.GetConsoleOutputCP()
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
return _decode_codepage(cp, data)
def _readline_default():
assert is_unix
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
if PY3:
return data.decode(_encoding, "surrogateescape")
else:
return data
def _readline():
if is_win:
return _readline_windows()
else:
return _readline_default()
def input_(prompt=None):
"""
Args:
prompt (object): Prints the passed object to stdout without
adding a trailing newline
Returns:
`fsnative`
Raises:
EnvironmentError
Like :func:`python3:input` but returns a `fsnative` and allows printing
filenames as prompt to stdout.
Use :func:`fsn2text` on the result if you just want to deal with text.
"""
if prompt is not None:
print_(prompt, end="")
return _readline()
def _get_file_name_for_handle(handle):
"""(Windows only) Returns a file name for a file handle.
Args:
handle (winapi.HANDLE)
Returns:
`text` or `None` if no file name could be retrieved.
"""
assert is_win
assert handle != winapi.INVALID_HANDLE_VALUE
size = winapi.FILE_NAME_INFO.FileName.offset + \
winapi.MAX_PATH * ctypes.sizeof(winapi.WCHAR)
buf = ctypes.create_string_buffer(size)
if winapi.GetFileInformationByHandleEx is None:
# Windows XP
return None
status = winapi.GetFileInformationByHandleEx(
handle, winapi.FileNameInfo, buf, size)
if status == 0:
return None
name_info = ctypes.cast(
buf, ctypes.POINTER(winapi.FILE_NAME_INFO)).contents
offset = winapi.FILE_NAME_INFO.FileName.offset
data = buf[offset:offset + name_info.FileNameLength]
return bytes2fsn(data, "utf-16-le")
def supports_ansi_escape_codes(fd):
"""Returns whether the output device is capable of interpreting ANSI escape
codes when :func:`print_` is used.
Args:
fd (int): file descriptor (e.g. ``sys.stdout.fileno()``)
Returns:
`bool`
"""
if os.isatty(fd):
return True
if not is_win:
return False
# Check for cygwin/msys terminal
handle = winapi._get_osfhandle(fd)
if handle == winapi.INVALID_HANDLE_VALUE:
return False
if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE:
return False
file_name = _get_file_name_for_handle(handle)
match = re.match(
"^\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$", file_name)
return match is not None
| 12,366
|
Python
|
.py
| 335
| 29.555224
| 79
| 0.634651
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,546
|
_stdlib.py
|
rembo10_headphones/lib/mutagen/_senf/_stdlib.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import os
from ._fsnative import path2fsn, fsnative, is_win
from ._compat import PY2
from ._environ import environ
sep = path2fsn(os.sep)
pathsep = path2fsn(os.pathsep)
curdir = path2fsn(os.curdir)
pardir = path2fsn(os.pardir)
altsep = path2fsn(os.altsep) if os.altsep is not None else None
extsep = path2fsn(os.extsep)
devnull = path2fsn(os.devnull)
defpath = path2fsn(os.defpath)
def getcwd():
"""Like `os.getcwd` but returns a `fsnative` path
Returns:
`fsnative`
"""
if is_win and PY2:
return os.getcwdu()
return os.getcwd()
def _get_userdir(user=None):
"""Returns the user dir or None"""
if user is not None and not isinstance(user, fsnative):
raise TypeError
if is_win:
if "HOME" in environ:
path = environ["HOME"]
elif "USERPROFILE" in environ:
path = environ["USERPROFILE"]
elif "HOMEPATH" in environ and "HOMEDRIVE" in environ:
path = os.path.join(environ["HOMEDRIVE"], environ["HOMEPATH"])
else:
return
if user is None:
return path
else:
return os.path.join(os.path.dirname(path), user)
else:
import pwd
if user is None:
if "HOME" in environ:
return environ["HOME"]
else:
try:
return path2fsn(pwd.getpwuid(os.getuid()).pw_dir)
except KeyError:
return
else:
try:
return path2fsn(pwd.getpwnam(user).pw_dir)
except KeyError:
return
def expanduser(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expanduser` but supports unicode home
directories under Windows + Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
if path == "~":
return _get_userdir()
elif path.startswith("~" + sep) or (
altsep is not None and path.startswith("~" + altsep)):
userdir = _get_userdir()
if userdir is None:
return path
return userdir + path[1:]
elif path.startswith("~"):
sep_index = path.find(sep)
if altsep is not None:
alt_index = path.find(altsep)
if alt_index != -1 and alt_index < sep_index:
sep_index = alt_index
if sep_index == -1:
user = path[1:]
rest = ""
else:
user = path[1:sep_index]
rest = path[sep_index:]
userdir = _get_userdir(user)
if userdir is not None:
return userdir + rest
else:
return path
else:
return path
def expandvars(path):
"""
Args:
path (pathlike): A path to expand
Returns:
`fsnative`
Like :func:`python:os.path.expandvars` but supports unicode under Windows
+ Python 2 and always returns a `fsnative`.
"""
path = path2fsn(path)
def repl_func(match):
return environ.get(match.group(1), match.group(0))
path = re.compile(r"\$(\w+)", flags=re.UNICODE).sub(repl_func, path)
if os.name == "nt":
path = re.sub(r"%([^%]+)%", repl_func, path)
return re.sub(r"\$\{([^\}]+)\}", repl_func, path)
| 4,466
|
Python
|
.py
| 127
| 28.125984
| 77
| 0.628479
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,547
|
_winapi.py
|
rembo10_headphones/lib/mutagen/_senf/_winapi.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import ctypes
if sys.platform == 'win32':
from ctypes import WinDLL, CDLL, wintypes
shell32 = WinDLL("shell32")
kernel32 = WinDLL("kernel32")
shlwapi = WinDLL("shlwapi")
msvcrt = CDLL("msvcrt")
GetCommandLineW = kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = wintypes.LPCWSTR
CommandLineToArgvW = shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [
wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR)
LocalFree = kernel32.LocalFree
LocalFree.argtypes = [wintypes.HLOCAL]
LocalFree.restype = wintypes.HLOCAL
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx
LPCTSTR = ctypes.c_wchar_p
LPWSTR = wintypes.LPWSTR
LPCWSTR = ctypes.c_wchar_p
LPTSTR = LPWSTR
PCWSTR = ctypes.c_wchar_p
PCTSTR = PCWSTR
PWSTR = ctypes.c_wchar_p
PTSTR = PWSTR
LPVOID = wintypes.LPVOID
WCHAR = wintypes.WCHAR
LPSTR = ctypes.c_char_p
BOOL = wintypes.BOOL
LPBOOL = ctypes.POINTER(BOOL)
UINT = wintypes.UINT
WORD = wintypes.WORD
DWORD = wintypes.DWORD
SHORT = wintypes.SHORT
HANDLE = wintypes.HANDLE
ULONG = wintypes.ULONG
LPCSTR = wintypes.LPCSTR
STD_INPUT_HANDLE = DWORD(-10)
STD_OUTPUT_HANDLE = DWORD(-11)
STD_ERROR_HANDLE = DWORD(-12)
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
INTERNET_MAX_SCHEME_LENGTH = 32
INTERNET_MAX_PATH_LENGTH = 2048
INTERNET_MAX_URL_LENGTH = (
INTERNET_MAX_SCHEME_LENGTH + len("://") + INTERNET_MAX_PATH_LENGTH)
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_RED = 0x0004
FOREGROUND_INTENSITY = 0x0008
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_RED = 0x0040
BACKGROUND_INTENSITY = 0x0080
COMMON_LVB_REVERSE_VIDEO = 0x4000
COMMON_LVB_UNDERSCORE = 0x8000
UrlCreateFromPathW = shlwapi.UrlCreateFromPathW
UrlCreateFromPathW.argtypes = [
PCTSTR, PTSTR, ctypes.POINTER(DWORD), DWORD]
UrlCreateFromPathW.restype = ctypes.HRESULT
SetEnvironmentVariableW = kernel32.SetEnvironmentVariableW
SetEnvironmentVariableW.argtypes = [LPCTSTR, LPCTSTR]
SetEnvironmentVariableW.restype = wintypes.BOOL
GetEnvironmentVariableW = kernel32.GetEnvironmentVariableW
GetEnvironmentVariableW.argtypes = [LPCTSTR, LPTSTR, DWORD]
GetEnvironmentVariableW.restype = DWORD
GetEnvironmentStringsW = kernel32.GetEnvironmentStringsW
GetEnvironmentStringsW.argtypes = []
GetEnvironmentStringsW.restype = ctypes.c_void_p
FreeEnvironmentStringsW = kernel32.FreeEnvironmentStringsW
FreeEnvironmentStringsW.argtypes = [ctypes.c_void_p]
FreeEnvironmentStringsW.restype = ctypes.c_bool
GetStdHandle = kernel32.GetStdHandle
GetStdHandle.argtypes = [DWORD]
GetStdHandle.restype = HANDLE
class COORD(ctypes.Structure):
_fields_ = [
("X", SHORT),
("Y", SHORT),
]
class SMALL_RECT(ctypes.Structure):
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
GetConsoleScreenBufferInfo = kernel32.GetConsoleScreenBufferInfo
GetConsoleScreenBufferInfo.argtypes = [
HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
GetConsoleScreenBufferInfo.restype = BOOL
GetConsoleOutputCP = kernel32.GetConsoleOutputCP
GetConsoleOutputCP.argtypes = []
GetConsoleOutputCP.restype = UINT
SetConsoleOutputCP = kernel32.SetConsoleOutputCP
SetConsoleOutputCP.argtypes = [UINT]
SetConsoleOutputCP.restype = BOOL
GetConsoleCP = kernel32.GetConsoleCP
GetConsoleCP.argtypes = []
GetConsoleCP.restype = UINT
SetConsoleCP = kernel32.SetConsoleCP
SetConsoleCP.argtypes = [UINT]
SetConsoleCP.restype = BOOL
SetConsoleTextAttribute = kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [HANDLE, WORD]
SetConsoleTextAttribute.restype = BOOL
SetConsoleCursorPosition = kernel32.SetConsoleCursorPosition
SetConsoleCursorPosition.argtypes = [HANDLE, COORD]
SetConsoleCursorPosition.restype = BOOL
ReadConsoleW = kernel32.ReadConsoleW
ReadConsoleW.argtypes = [
HANDLE, LPVOID, DWORD, ctypes.POINTER(DWORD), LPVOID]
ReadConsoleW.restype = BOOL
MultiByteToWideChar = kernel32.MultiByteToWideChar
MultiByteToWideChar.argtypes = [
UINT, DWORD, LPCSTR, ctypes.c_int, LPWSTR, ctypes.c_int]
MultiByteToWideChar.restype = ctypes.c_int
WideCharToMultiByte = kernel32.WideCharToMultiByte
WideCharToMultiByte.argtypes = [
UINT, DWORD, LPCWSTR, ctypes.c_int, LPSTR, ctypes.c_int,
LPCSTR, LPBOOL]
WideCharToMultiByte.restype = ctypes.c_int
MoveFileW = kernel32.MoveFileW
MoveFileW.argtypes = [LPCTSTR, LPCTSTR]
MoveFileW.restype = BOOL
GetFileInformationByHandleEx = None
if hasattr(kernel32, "GetFileInformationByHandleEx"):
GetFileInformationByHandleEx = kernel32.GetFileInformationByHandleEx
GetFileInformationByHandleEx.argtypes = [
HANDLE, ctypes.c_int, ctypes.c_void_p, DWORD]
GetFileInformationByHandleEx.restype = BOOL
else:
# Windows XP
pass
MAX_PATH = 260
FileNameInfo = 2
class FILE_NAME_INFO(ctypes.Structure):
_fields_ = [
("FileNameLength", DWORD),
("FileName", WCHAR),
]
_get_osfhandle = msvcrt._get_osfhandle
_get_osfhandle.argtypes = [ctypes.c_int]
_get_osfhandle.restype = HANDLE
GetFileType = kernel32.GetFileType
GetFileType.argtypes = [HANDLE]
GetFileType.restype = DWORD
FILE_TYPE_PIPE = 0x0003
| 7,208
|
Python
|
.py
| 178
| 34.691011
| 76
| 0.724488
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,548
|
_fsnative.py
|
rembo10_headphones/lib/mutagen/_senf/_fsnative.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import ctypes
import codecs
from . import _winapi as winapi
from ._compat import text_type, PY3, PY2, urlparse, quote, unquote, urlunparse
is_win = os.name == "nt"
is_unix = not is_win
is_darwin = sys.platform == "darwin"
_surrogatepass = "strict" if PY2 else "surrogatepass"
def _normalize_codec(codec, _cache={}):
"""Raises LookupError"""
try:
return _cache[codec]
except KeyError:
_cache[codec] = codecs.lookup(codec).name
return _cache[codec]
def _swap_bytes(data):
"""swaps bytes for 16 bit, leaves remaining trailing bytes alone"""
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data)
def _decode_surrogatepass(data, codec):
"""Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work
on Python 2.
https://bugs.python.org/issue27971
Raises UnicodeDecodeError, LookupError
"""
try:
return data.decode(codec, _surrogatepass)
except UnicodeDecodeError:
if PY2:
if _normalize_codec(codec) == "utf-16-be":
data = _swap_bytes(data)
codec = "utf-16-le"
if _normalize_codec(codec) == "utf-16-le":
buffer_ = ctypes.create_string_buffer(data + b"\x00\x00")
value = ctypes.wstring_at(buffer_, len(data) // 2)
if value.encode("utf-16-le", _surrogatepass) != data:
raise
return value
else:
raise
else:
raise
def _merge_surrogates(text):
"""Returns a copy of the text with all surrogate pairs merged"""
return _decode_surrogatepass(
text.encode("utf-16-le", _surrogatepass),
"utf-16-le")
def fsn2norm(path):
"""
Args:
path (fsnative): The path to normalize
Returns:
`fsnative`
Normalizes an fsnative path.
The same underlying path can have multiple representations as fsnative
(due to surrogate pairs and variable length encodings). When concatenating
fsnative the result might be different than concatenating the serialized
form and then deserializing it.
This returns the normalized form i.e. the form which os.listdir() would
return. This is useful when you alter fsnative but require that the same
underlying path always maps to the same fsnative value.
All functions like :func:`bytes2fsn`, :func:`fsnative`, :func:`text2fsn`
and :func:`path2fsn` always return a normalized path, independent of their
input.
"""
native = _fsn2native(path)
if is_win:
return _merge_surrogates(native)
elif PY3:
return bytes2fsn(native, None)
else:
return path
def _fsn2legacy(path):
"""Takes a fsnative path and returns a path that can be put into os.environ
or sys.argv. Might result in a mangled path on Python2 + Windows.
Can't fail.
Args:
path (fsnative)
Returns:
str
"""
if PY2 and is_win:
return path.encode(_encoding, "replace")
return path
def _fsnative(text):
if not isinstance(text, text_type):
raise TypeError("%r needs to be a text type (%r)" % (text, text_type))
if is_unix:
# First we go to bytes so we can be sure we have a valid source.
# Theoretically we should fail here in case we have a non-unicode
# encoding. But this would make everything complicated and there is
# no good way to handle a failure from the user side. Instead
# fall back to utf-8 which is the most likely the right choice in
# a mis-configured environment
encoding = _encoding
try:
path = text.encode(encoding, _surrogatepass)
except UnicodeEncodeError:
path = text.encode("utf-8", _surrogatepass)
if b"\x00" in path:
path = path.replace(b"\x00", fsn2bytes(_fsnative(u"\uFFFD"), None))
if PY3:
return path.decode(_encoding, "surrogateescape")
return path
else:
if u"\x00" in text:
text = text.replace(u"\x00", u"\uFFFD")
text = fsn2norm(text)
return text
def _create_fsnative(type_):
# a bit of magic to make fsnative(u"foo") and isinstance(path, fsnative)
# work
class meta(type):
def __instancecheck__(self, instance):
return _typecheck_fsnative(instance)
def __subclasscheck__(self, subclass):
return issubclass(subclass, type_)
class impl(object):
"""fsnative(text=u"")
Args:
text (text): The text to convert to a path
Returns:
fsnative: The new path.
Raises:
TypeError: In case something other then `text` has been passed
This type is a virtual base class for the real path type.
Instantiating it returns an instance of the real path type and it
overrides instance and subclass checks so that `isinstance` and
`issubclass` checks work:
::
isinstance(fsnative(u"foo"), fsnative) == True
issubclass(type(fsnative(u"foo")), fsnative) == True
The real returned type is:
- **Python 2 + Windows:** :obj:`python:unicode`, with ``surrogates``,
without ``null``
- **Python 2 + Unix:** :obj:`python:str`, without ``null``
- **Python 3 + Windows:** :obj:`python3:str`, with ``surrogates``,
without ``null``
- **Python 3 + Unix:** :obj:`python3:str`, with ``surrogates``, without
``null``, without code points not encodable with the locale encoding
Constructing a `fsnative` can't fail.
Passing a `fsnative` to :func:`open` will never lead to `ValueError`
or `TypeError`.
Any operation on `fsnative` can also use the `str` type, as long as
the `str` only contains ASCII and no NULL.
"""
def __new__(cls, text=u""):
return _fsnative(text)
new_type = meta("fsnative", (object,), dict(impl.__dict__))
new_type.__module__ = "senf"
return new_type
fsnative_type = text_type if is_win or PY3 else bytes
fsnative = _create_fsnative(fsnative_type)
def _typecheck_fsnative(path):
"""
Args:
path (object)
Returns:
bool: if path is a fsnative
"""
if not isinstance(path, fsnative_type):
return False
if PY3 or is_win:
if u"\x00" in path:
return False
if is_unix:
try:
path.encode(_encoding, "surrogateescape")
except UnicodeEncodeError:
return False
elif b"\x00" in path:
return False
return True
def _fsn2native(path):
"""
Args:
path (fsnative)
Returns:
`text` on Windows, `bytes` on Unix
Raises:
TypeError: in case the type is wrong or the ´str` on Py3 + Unix
can't be converted to `bytes`
This helper allows to validate the type and content of a path.
To reduce overhead the encoded value for Py3 + Unix is returned so
it can be reused.
"""
if not isinstance(path, fsnative_type):
raise TypeError("path needs to be %s, not %s" % (
fsnative_type.__name__, type(path).__name__))
if is_unix:
if PY3:
try:
path = path.encode(_encoding, "surrogateescape")
except UnicodeEncodeError:
# This look more like ValueError, but raising only one error
# makes things simpler... also one could say str + surrogates
# is its own type
raise TypeError(
"path contained Unicode code points not valid in"
"the current path encoding. To create a valid "
"path from Unicode use text2fsn()")
if b"\x00" in path:
raise TypeError("fsnative can't contain nulls")
else:
if u"\x00" in path:
raise TypeError("fsnative can't contain nulls")
return path
def _get_encoding():
"""The encoding used for paths, argv, environ, stdout and stdin"""
encoding = sys.getfilesystemencoding()
if encoding is None:
if is_darwin:
encoding = "utf-8"
elif is_win:
encoding = "mbcs"
else:
encoding = "ascii"
encoding = _normalize_codec(encoding)
return encoding
_encoding = _get_encoding()
def path2fsn(path):
"""
Args:
path (pathlike): The path to convert
Returns:
`fsnative`
Raises:
TypeError: In case the type can't be converted to a `fsnative`
ValueError: In case conversion fails
Returns a `fsnative` path for a `pathlike`.
"""
# allow mbcs str on py2+win and bytes on py3
if PY2:
if is_win:
if isinstance(path, bytes):
path = path.decode(_encoding)
else:
if isinstance(path, text_type):
path = path.encode(_encoding)
if "\x00" in path:
raise ValueError("embedded null")
else:
path = getattr(os, "fspath", lambda x: x)(path)
if isinstance(path, bytes):
if b"\x00" in path:
raise ValueError("embedded null")
path = path.decode(_encoding, "surrogateescape")
elif is_unix and isinstance(path, str):
# make sure we can encode it and this is not just some random
# unicode string
data = path.encode(_encoding, "surrogateescape")
if b"\x00" in data:
raise ValueError("embedded null")
path = fsn2norm(path)
else:
if u"\x00" in path:
raise ValueError("embedded null")
path = fsn2norm(path)
if not isinstance(path, fsnative_type):
raise TypeError("path needs to be %s", fsnative_type.__name__)
return path
def fsn2text(path, strict=False):
"""
Args:
path (fsnative): The path to convert
strict (bool): Fail in case the conversion is not reversible
Returns:
`text`
Raises:
TypeError: In case no `fsnative` has been passed
ValueError: In case ``strict`` was True and the conversion failed
Converts a `fsnative` path to `text`.
Can be used to pass a path to some unicode API, like for example a GUI
toolkit.
If ``strict`` is True the conversion will fail in case it is not
reversible. This can be useful for converting program arguments that are
supposed to be text and erroring out in case they are not.
Encoding with a Unicode encoding will always succeed with the result.
"""
path = _fsn2native(path)
errors = "strict" if strict else "replace"
if is_win:
return path.encode("utf-16-le", _surrogatepass).decode("utf-16-le",
errors)
else:
return path.decode(_encoding, errors)
def text2fsn(text):
"""
Args:
text (text): The text to convert
Returns:
`fsnative`
Raises:
TypeError: In case no `text` has been passed
Takes `text` and converts it to a `fsnative`.
This operation is not reversible and can't fail.
"""
return fsnative(text)
def fsn2bytes(path, encoding="utf-8"):
"""
Args:
path (fsnative): The path to convert
encoding (`str`): encoding used for Windows
Returns:
`bytes`
Raises:
TypeError: If no `fsnative` path is passed
ValueError: If encoding fails or the encoding is invalid
Converts a `fsnative` path to `bytes`.
The passed *encoding* is only used on platforms where paths are not
associated with an encoding (Windows for example).
For Windows paths, lone surrogates will be encoded like normal code points
and surrogate pairs will be merged before encoding. In case of ``utf-8``
or ``utf-16-le`` this is equal to the `WTF-8 and WTF-16 encoding
<https://simonsapin.github.io/wtf-8/>`__.
"""
path = _fsn2native(path)
if is_win:
if encoding is None:
raise ValueError("invalid encoding %r" % encoding)
if PY2:
try:
return path.encode(encoding)
except LookupError:
raise ValueError("invalid encoding %r" % encoding)
else:
try:
return path.encode(encoding)
except LookupError:
raise ValueError("invalid encoding %r" % encoding)
except UnicodeEncodeError:
# Fallback implementation for text including surrogates
# merge surrogate codepoints
if _normalize_codec(encoding).startswith("utf-16"):
# fast path, utf-16 merges anyway
return path.encode(encoding, _surrogatepass)
return _merge_surrogates(path).encode(encoding, _surrogatepass)
else:
return path
def bytes2fsn(data, encoding="utf-8"):
"""
Args:
data (bytes): The data to convert
encoding (`str`): encoding used for Windows
Returns:
`fsnative`
Raises:
TypeError: If no `bytes` path is passed
ValueError: If decoding fails or the encoding is invalid
Turns `bytes` to a `fsnative` path.
The passed *encoding* is only used on platforms where paths are not
associated with an encoding (Windows for example).
For Windows paths ``WTF-8`` is accepted if ``utf-8`` is used and
``WTF-16`` accepted if ``utf-16-le`` is used.
"""
if not isinstance(data, bytes):
raise TypeError("data needs to be bytes")
if is_win:
if encoding is None:
raise ValueError("invalid encoding %r" % encoding)
try:
path = _decode_surrogatepass(data, encoding)
except LookupError:
raise ValueError("invalid encoding %r" % encoding)
if u"\x00" in path:
raise ValueError("contains nulls")
return path
else:
if b"\x00" in data:
raise ValueError("contains nulls")
if PY2:
return data
else:
return data.decode(_encoding, "surrogateescape")
def uri2fsn(uri):
"""
Args:
uri (`text` or :obj:`python:str`): A file URI
Returns:
`fsnative`
Raises:
TypeError: In case an invalid type is passed
ValueError: In case the URI isn't a valid file URI
Takes a file URI and returns a `fsnative` path
"""
if PY2:
if isinstance(uri, text_type):
uri = uri.encode("utf-8")
if not isinstance(uri, bytes):
raise TypeError("uri needs to be ascii str or unicode")
else:
if not isinstance(uri, str):
raise TypeError("uri needs to be str")
parsed = urlparse(uri)
scheme = parsed.scheme
netloc = parsed.netloc
path = parsed.path
if scheme != "file":
raise ValueError("Not a file URI: %r" % uri)
if not path:
raise ValueError("Invalid file URI: %r" % uri)
uri = urlunparse(parsed)[7:]
if is_win:
try:
drive, rest = uri.split(":", 1)
except ValueError:
path = ""
rest = uri.replace("/", "\\")
else:
path = drive[-1] + ":"
rest = rest.replace("/", "\\")
if PY2:
path += unquote(rest)
else:
path += unquote(rest, encoding="utf-8", errors="surrogatepass")
if netloc:
path = "\\\\" + path
if PY2:
path = path.decode("utf-8")
if u"\x00" in path:
raise ValueError("embedded null")
return path
else:
if PY2:
path = unquote(uri)
else:
path = unquote(uri, encoding=_encoding, errors="surrogateescape")
if "\x00" in path:
raise ValueError("embedded null")
return path
def fsn2uri(path):
"""
Args:
path (fsnative): The path to convert to an URI
Returns:
`text`: An ASCII only URI
Raises:
TypeError: If no `fsnative` was passed
ValueError: If the path can't be converted
Takes a `fsnative` path and returns a file URI.
On Windows non-ASCII characters will be encoded using utf-8 and then
percent encoded.
"""
path = _fsn2native(path)
def _quote_path(path):
# RFC 2396
path = quote(path, "/:@&=+$,")
if PY2:
path = path.decode("ascii")
return path
if is_win:
buf = ctypes.create_unicode_buffer(winapi.INTERNET_MAX_URL_LENGTH)
length = winapi.DWORD(winapi.INTERNET_MAX_URL_LENGTH)
flags = 0
try:
winapi.UrlCreateFromPathW(path, buf, ctypes.byref(length), flags)
except WindowsError as e:
raise ValueError(e)
uri = buf[:length.value]
# https://bitbucket.org/pypy/pypy/issues/3133
uri = _merge_surrogates(uri)
# For some reason UrlCreateFromPathW escapes some chars outside of
# ASCII and some not. Unquote and re-quote with utf-8.
if PY3:
# latin-1 maps code points directly to bytes, which is what we want
uri = unquote(uri, "latin-1")
else:
# Python 2 does what we want by default
uri = unquote(uri)
return _quote_path(uri.encode("utf-8", _surrogatepass))
else:
return u"file://" + _quote_path(path)
| 18,792
|
Python
|
.py
| 498
| 29.369478
| 79
| 0.614411
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,549
|
__init__.py
|
rembo10_headphones/lib/mutagen/_senf/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ._fsnative import fsnative, path2fsn, fsn2text, fsn2bytes, \
bytes2fsn, uri2fsn, fsn2uri, text2fsn, fsn2norm
from ._print import print_, input_, supports_ansi_escape_codes
from ._stdlib import sep, pathsep, curdir, pardir, altsep, extsep, devnull, \
defpath, getcwd, expanduser, expandvars
from ._argv import argv
from ._environ import environ, getenv, unsetenv, putenv
from ._temp import mkstemp, gettempdir, gettempprefix, mkdtemp
fsnative, print_, getcwd, getenv, unsetenv, putenv, environ, expandvars, \
path2fsn, fsn2text, fsn2bytes, bytes2fsn, uri2fsn, fsn2uri, mkstemp, \
gettempdir, gettempprefix, mkdtemp, input_, expanduser, text2fsn, \
supports_ansi_escape_codes, fsn2norm
version = (1, 4, 2)
"""Tuple[`int`, `int`, `int`]: The version tuple (major, minor, micro)"""
version_string = ".".join(map(str, version))
"""`str`: A version string"""
argv = argv
"""List[`fsnative`]: Like `sys.argv` but contains unicode under
Windows + Python 2
"""
sep = sep
"""`fsnative`: Like `os.sep` but a `fsnative`"""
pathsep = pathsep
"""`fsnative`: Like `os.pathsep` but a `fsnative`"""
curdir = curdir
"""`fsnative`: Like `os.curdir` but a `fsnative`"""
pardir = pardir
"""`fsnative`: Like `os.pardir` but a fsnative"""
altsep = altsep
"""`fsnative` or `None`: Like `os.altsep` but a `fsnative` or `None`"""
extsep = extsep
"""`fsnative`: Like `os.extsep` but a `fsnative`"""
devnull = devnull
"""`fsnative`: Like `os.devnull` but a `fsnative`"""
defpath = defpath
"""`fsnative`: Like `os.defpath` but a `fsnative`"""
__all__ = []
| 2,696
|
Python
|
.py
| 58
| 44.672414
| 77
| 0.741478
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,550
|
_temp.py
|
rembo10_headphones/lib/mutagen/_senf/_temp.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tempfile
from ._fsnative import path2fsn, fsnative
def gettempdir():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempdir`, but always returns a `fsnative`
path
"""
# FIXME: I don't want to reimplement all that logic, reading env vars etc.
# At least for the default it works.
return path2fsn(tempfile.gettempdir())
def gettempprefix():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempprefix`, but always returns a
`fsnative` path
"""
return path2fsn(tempfile.gettempprefix())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
text (bool): if the file should be opened in text mode
Returns:
Tuple[`int`, `fsnative`]:
A tuple containing the file descriptor and the file path
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative`
path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkstemp(suffix, prefix, dir, text)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
Returns:
`fsnative`: A path to a directory
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkdtemp(suffix, prefix, dir)
| 3,282
|
Python
|
.py
| 76
| 38.552632
| 79
| 0.712492
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,551
|
_util.py
|
rembo10_headphones/lib/mutagen/mp4/_util.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from mutagen._util import cdata
def parse_full_atom(data):
"""Some atoms are versioned. Split them up in (version, flags, payload).
Can raise ValueError.
"""
if len(data) < 4:
raise ValueError("not enough data")
version = ord(data[0:1])
flags = cdata.uint_be(b"\x00" + data[1:4])
return version, flags, data[4:]
| 665
|
Python
|
.py
| 17
| 35.705882
| 76
| 0.7014
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,552
|
__init__.py
|
rembo10_headphones/lib/mutagen/mp4/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from io import BytesIO
from collections.abc import Sequence
from datetime import timedelta
from mutagen import FileType, Tags, StreamInfo, PaddingInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, DictProxy, MutagenError, \
hashable, enum, get_size, resize_bytes, loadfile, convert_error, bchr, \
reraise
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4NoTrackError(MP4StreamInfoError):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType']
@enum
class AtomDataType(object):
"""Enum for ``dataformat`` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
imageformat (`AtomDataType`): format of the image
(either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.imageformat == other.imageformat)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
dataformat (`AtomDataType`): format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.dataformat == other.dataformat and
self.version == other.version)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
return name.decode("latin-1")
def _key2name(key):
return key.encode("latin-1")
def _find_padding(atom_path):
# Check for padding "free" atom
# XXX: we only use them if they are adjacent to ilst, and only one.
# and there also is a top level free atom which we could use maybe..?
meta, ilst = atom_path[-2:]
assert meta.name == b"meta" and ilst.name == b"ilst"
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
return prev
except IndexError:
pass
try:
next_ = meta.children[index + 1]
if next_.name == b"free":
return next_
except IndexError:
pass
def _item_sort_key(key, value):
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(value)), repr(value))
class MP4Tags(DictProxy, Tags):
r"""MP4Tags()
Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
* '\\xa9wrk' -- work
* '\\xa9mvn' -- movement
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Integer values:
* 'tmpo' -- tempo/BPM
* '\\xa9mvc' -- Movement Count
* '\\xa9mvi' -- Movement Index
* 'shwm' -- work/movement
* 'stik' -- Media Kind
* 'hdvd' -- HD Video
* 'rtng' -- Content Rating
* 'tves' -- TV Episode
* 'tvsn' -- TV Season
* 'plID', 'cnID', 'geID', 'atID', 'sfID', 'cmID', 'akID' -- Various iTunes
Internal IDs
Others:
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def load(self, atoms, fileobj):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError as key:
raise MP4MetadataError(key)
free = _find_padding(path)
self._padding = free.datalength if free is not None else 0
ilst = path[-1]
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
self._render(key, value)
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
def _render(self, key, value):
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
render_args = self.__atoms[atom_name][2:]
else:
render_func = type(self).__render_text
render_args = []
return render_func(self, key, value, *render_args)
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, padding=None):
values = []
items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv))
for key, value in items:
try:
values.append(self._render(key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in self._failed_atoms.items():
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
try:
atoms = Atoms(filething.fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.__save(filething.fileobj, atoms, data, padding)
def __save(self, fileobj, atoms, data, padding):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, padding)
else:
self.__save_existing(fileobj, atoms, path, data, padding)
def __save_new(self, fileobj, atoms, ilst_data, padding_func):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
path = atoms.path(b"moov")
offset = path[-1]._dataoffset
# ignoring some atom overhead... but we don't have padding left anyway
# and padding_size is guaranteed to be less than zero
content_size = get_size(fileobj) - offset
padding_size = -len(meta_data)
assert padding_size < 0
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
new_padding = min(0xFFFFFFFF, new_padding)
free = Atom.render(b"free", b"\x00" * new_padding)
meta = Atom.render(b"meta", meta_data + free)
if path[-1].name != b"udta":
# moov.udta not found -- create one
data = Atom.render(b"udta", meta)
else:
data = meta
insert_bytes(fileobj, len(data), offset)
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, len(data))
self.__update_offsets(fileobj, atoms, len(data), offset)
def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func):
# Replace the old ilst atom.
ilst = path[-1]
offset = ilst.offset
length = ilst.length
# Use adjacent free atom if there is one
free = _find_padding(path)
if free is not None:
offset = min(offset, free.offset)
length += free.length
# Always add a padding atom to make things easier
padding_overhead = len(Atom.render(b"free", b""))
content_size = get_size(fileobj) - (offset + length)
padding_size = length - (len(ilst_data) + padding_overhead)
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
# Limit padding size so we can be sure the free atom overhead is as we
# calculated above (see Atom.render)
new_padding = min(0xFFFFFFFF, new_padding)
ilst_data += Atom.render(b"free", b"\x00" * new_padding)
resize_bytes(fileobj, length, len(ilst_data), offset)
delta = len(ilst_data) - length
fileobj.seek(offset)
fileobj.write(ilst_data)
self.__update_parents(fileobj, path[:-1], delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
if delta == 0:
return
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for v in value:
try:
track, total = v
except TypeError:
raise ValueError
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_integer(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
if version != 0:
raise MP4MetadataValueError("unsupported version")
if flags not in (AtomDataType.IMPLICIT, AtomDataType.INTEGER):
raise MP4MetadataValueError("unsupported type")
if len(data) == 1:
value = cdata.int8(data)
elif len(data) == 2:
value = cdata.int16_be(data)
elif len(data) == 3:
value = cdata.int32_be(data + b"\x00") >> 8
elif len(data) == 4:
value = cdata.int32_be(data)
elif len(data) == 8:
value = cdata.int64_be(data)
else:
raise MP4MetadataValueError(
"invalid value size %d" % len(data))
values.append(value)
key = _name2key(atom.name)
self.__add(key, values)
def __render_integer(self, key, value, min_bytes):
assert min_bytes in (1, 2, 4, 8)
data_list = []
try:
for v in value:
# We default to the int size of the usual values written
# by itunes for compatibility.
if cdata.int8_min <= v <= cdata.int8_max and min_bytes <= 1:
data = cdata.to_int8(v)
elif cdata.int16_min <= v <= cdata.int16_max and \
min_bytes <= 2:
data = cdata.to_int16_be(v)
elif cdata.int32_min <= v <= cdata.int32_max and \
min_bytes <= 4:
data = cdata.to_int32_be(v)
elif cdata.int64_min <= v <= cdata.int64_max and \
min_bytes <= 8:
data = cdata.to_int64_be(v)
else:
raise MP4MetadataValueError(
"value out of range: %r" % value)
data_list.append(data)
except (TypeError, ValueError, cdata.error) as e:
raise MP4MetadataValueError(e)
return self.__render_data(key, 0, AtomDataType.INTEGER, data_list)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [bchr(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if length < 1:
raise MP4MetadataError(
"atom %r has a length of zero" % atom.name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, str):
value = [value]
encoded = []
for v in value:
if not isinstance(v, str):
raise TypeError("%r not str" % v)
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"plID": (__parse_integer, __render_integer, 8),
b"cnID": (__parse_integer, __render_integer, 4),
b"geID": (__parse_integer, __render_integer, 4),
b"atID": (__parse_integer, __render_integer, 4),
b"sfID": (__parse_integer, __render_integer, 4),
b"cmID": (__parse_integer, __render_integer, 4),
b"akID": (__parse_integer, __render_integer, 1),
b"tvsn": (__parse_integer, __render_integer, 4),
b"tves": (__parse_integer, __render_integer, 4),
b"tmpo": (__parse_integer, __render_integer, 2),
b"\xa9mvi": (__parse_integer, __render_integer, 2),
b"\xa9mvc": (__parse_integer, __render_integer, 2),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"shwm": (__parse_integer, __render_integer, 1),
b"stik": (__parse_integer, __render_integer, 1),
b"hdvd": (__parse_integer, __render_integer, 1),
b"rtng": (__parse_integer, __render_integer, 1),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
def to_line(key, value):
assert isinstance(key, str)
if isinstance(value, str):
return u"%s=%s" % (key, value)
return u"%s=%r" % (key, value)
values = []
for key, value in sorted(self.items()):
if not isinstance(key, str):
key = key.decode("latin-1")
if key == "covr":
values.append(u"%s=%s" % (key, u", ".join(
[u"[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append(to_line(key, v))
else:
values.append(to_line(key, value))
return u"\n".join(values)
class Chapter(object):
"""Chapter()
Chapter information container
"""
def __init__(self, start, title):
self.start = start
self.title = title
class MP4Chapters(Sequence):
"""MP4Chapters()
MPEG-4 Chapter information.
Supports the 'moov.udta.chpl' box.
A sequence of Chapter objects with the following members:
start (`float`): position from the start of the file in seconds
title (`str`): title of the chapter
"""
def __init__(self, *args, **kwargs):
self._timescale = None
self._duration = None
self._chapters = []
super(MP4Chapters, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def __len__(self):
return self._chapters.__len__()
def __getitem__(self, key):
return self._chapters.__getitem__(key)
def load(self, atoms, fileobj):
try:
mvhd = atoms.path(b"moov", b"mvhd")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_mvhd(mvhd, fileobj)
if not self._timescale:
raise MP4MetadataError("Unable to get timescale")
try:
chpl = atoms.path(b"moov", b"udta", b"chpl")[-1]
except KeyError as key:
return MP4MetadataError(key)
self._parse_chpl(chpl, fileobj)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.chpl" in atoms and b"moov.mvhd" in atoms
def _parse_mvhd(self, atom, fileobj):
assert atom.name == b"mvhd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid mvhd")
version = data[0]
pos = 4
if version == 0:
pos += 8 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
elif version == 1:
pos += 16 # created, modified
self._timescale = struct.unpack(">l", data[pos:pos + 4])[0]
pos += 4
self._duration = struct.unpack(">q", data[pos:pos + 8])[0]
pos += 8
def _parse_chpl(self, atom, fileobj):
assert atom.name == b"chpl"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid atom")
chapters = data[8]
pos = 9
for i in range(chapters):
start = struct.unpack(">Q", data[pos:pos + 8])[0] / 10000
pos += 8
title_len = data[pos]
pos += 1
try:
title = data[pos:pos + title_len].decode()
except UnicodeDecodeError as e:
raise MP4MetadataError("chapter %d title: %s" % (i, e))
pos += title_len
self._chapters.append(Chapter(start / self._timescale, title))
def pprint(self):
chapters = ["%s %s" % (timedelta(seconds=chapter.start), chapter.title)
for chapter in self._chapters]
return "chapters=%s" % '\n '.join(chapters)
class MP4Info(StreamInfo):
"""MP4Info()
MPEG-4 stream information.
Attributes:
bitrate (`int`): bitrate in bits per second, as an int
length (`float`): file length in seconds, as a float
channels (`int`): number of audio channels
sample_rate (`int`): audio sampling rate in Hz
bits_per_sample (`int`): bits per sample
codec (`mutagen.text`):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g.
``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
codec_description (`mutagen.text`):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might
change in the future, use for display purposes only.
"""
bitrate = 0
length = 0.0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_description = u""
def __init__(self, *args, **kwargs):
if args or kwargs:
self.load(*args, **kwargs)
@convert_error(IOError, MP4StreamInfoError)
def load(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4NoTrackError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = BytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""MP4(filething)
An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
Arguments:
filething (filething)
Attributes:
info (`MP4Info`)
tags (`MP4Tags`)
"""
MP4Tags = MP4Tags
MP4Chapters = MP4Chapters
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@loadfile()
def load(self, filething):
fileobj = filething.fileobj
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.info = MP4Info()
try:
self.info.load(atoms, fileobj)
except MP4NoTrackError:
pass
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
if not MP4Chapters._can_load(atoms):
self.chapters = None
else:
try:
self.chapters = self.MP4Chapters(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
@property
def _padding(self):
if self.tags is None:
return 0
else:
return self.tags._padding
def save(self, *args, **kwargs):
"""save(filething=None, padding=None)"""
super(MP4, self).save(*args, **kwargs)
def pprint(self):
"""
Returns:
text: stream information, comment key=value pairs and chapters.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
pass
else:
stream += ((tags and "\n" + tags) or "")
try:
chapters = self.chapters.pprint()
except AttributeError:
pass
else:
stream += "\n" + chapters
return stream
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = MP4(filething)
filething.fileobj.seek(0)
t.delete(filething)
| 41,210
|
Python
|
.py
| 1,018
| 30.527505
| 79
| 0.563652
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,553
|
_atom.py
|
rembo10_headphones/lib/mutagen/mp4/_atom.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
from mutagen._util import convert_error
# This is not an exhaustive list of container atoms, but just the
# ones this module needs to peek inside.
_CONTAINERS = [b"moov", b"udta", b"trak", b"mdia", b"meta", b"ilst",
b"stbl", b"minf", b"moof", b"traf"]
_SKIP_SIZE = {b"meta": 4}
class AtomError(Exception):
pass
class Atom(object):
"""An individual atom.
Attributes:
children -- list child atoms (or None for non-container atoms)
length -- length of this atom, including length and name
datalength = -- length of this atom without length, name
name -- four byte name of the atom, as a str
offset -- location in the constructor-given fileobj of this atom
This structure should only be used internally by Mutagen.
"""
children = None
@convert_error(IOError, AtomError)
def __init__(self, fileobj, level=0):
"""May raise AtomError"""
self.offset = fileobj.tell()
try:
self.length, self.name = struct.unpack(">I4s", fileobj.read(8))
except struct.error:
raise AtomError("truncated data")
self._dataoffset = self.offset + 8
if self.length == 1:
try:
self.length, = struct.unpack(">Q", fileobj.read(8))
except struct.error:
raise AtomError("truncated data")
self._dataoffset += 8
if self.length < 16:
raise AtomError(
"64 bit atom length can only be 16 and higher")
elif self.length == 0:
if level != 0:
raise AtomError(
"only a top-level atom can have zero length")
# Only the last atom is supposed to have a zero-length, meaning it
# extends to the end of file.
fileobj.seek(0, 2)
self.length = fileobj.tell() - self.offset
fileobj.seek(self.offset + 8, 0)
elif self.length < 8:
raise AtomError(
"atom length can only be 0, 1 or 8 and higher")
if self.name in _CONTAINERS:
self.children = []
fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1)
while fileobj.tell() < self.offset + self.length:
self.children.append(Atom(fileobj, level + 1))
else:
fileobj.seek(self.offset + self.length, 0)
@property
def datalength(self):
return self.length - (self._dataoffset - self.offset)
def read(self, fileobj):
"""Return if all data could be read and the atom payload"""
fileobj.seek(self._dataoffset, 0)
data = fileobj.read(self.datalength)
return len(data) == self.datalength, data
@staticmethod
def render(name, data):
"""Render raw atom data."""
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data
def findall(self, name, recursive=False):
"""Recursively find all child atoms by specified name."""
if self.children is not None:
for child in self.children:
if child.name == name:
yield child
if recursive:
for atom in child.findall(name, True):
yield atom
def __getitem__(self, remaining):
"""Look up a child atom, potentially recursively.
e.g. atom['udta', 'meta'] => <Atom name='meta' ...>
"""
if not remaining:
return self
elif self.children is None:
raise KeyError("%r is not a container" % self.name)
for child in self.children:
if child.name == remaining[0]:
return child[remaining[1:]]
else:
raise KeyError("%r not found" % remaining[0])
def __repr__(self):
cls = self.__class__.__name__
if self.children is None:
return "<%s name=%r length=%r offset=%r>" % (
cls, self.name, self.length, self.offset)
else:
children = "\n".join([" " + line for child in self.children
for line in repr(child).splitlines()])
return "<%s name=%r length=%r offset=%r\n%s>" % (
cls, self.name, self.length, self.offset, children)
class Atoms(object):
"""Root atoms in a given file.
Attributes:
atoms -- a list of top-level atoms as Atom objects
This structure should only be used internally by Mutagen.
"""
@convert_error(IOError, AtomError)
def __init__(self, fileobj):
self.atoms = []
fileobj.seek(0, 2)
end = fileobj.tell()
fileobj.seek(0)
while fileobj.tell() + 8 <= end:
self.atoms.append(Atom(fileobj))
def path(self, *names):
"""Look up and return the complete path of an atom.
For example, atoms.path('moov', 'udta', 'meta') will return a
list of three atoms, corresponding to the moov, udta, and meta
atoms.
"""
path = [self]
for name in names:
path.append(path[-1][name, ])
return path[1:]
def __contains__(self, names):
try:
self[names]
except KeyError:
return False
return True
def __getitem__(self, names):
"""Look up a child atom.
'names' may be a list of atoms (['moov', 'udta']) or a string
specifying the complete path ('moov.udta').
"""
if isinstance(names, bytes):
names = names.split(b".")
for child in self.atoms:
if child.name == names[0]:
return child[names[1:]]
else:
raise KeyError("%r not found" % names[0])
def __repr__(self):
return "\n".join([repr(child) for child in self.atoms])
| 6,333
|
Python
|
.py
| 157
| 30.649682
| 78
| 0.575965
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,554
|
_as_entry.py
|
rembo10_headphones/lib/mutagen/mp4/_as_entry.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from io import BytesIO
from mutagen.aac import ProgramConfigElement
from mutagen._util import BitReader, BitReaderError, cdata
from ._util import parse_full_atom
from ._atom import Atom, AtomError
class ASEntryError(Exception):
pass
class AudioSampleEntry(object):
"""Parses an AudioSampleEntry atom.
Private API.
Attrs:
channels (int): number of channels
sample_size (int): sample size in bits
sample_rate (int): sample rate in Hz
bitrate (int): bits per second (0 means unknown)
codec (string):
audio codec, either 'mp4a[.*][.*]' (rfc6381) or 'alac'
codec_description (string): descriptive codec name e.g. "AAC LC+SBR"
Can raise ASEntryError.
"""
channels = 0
sample_size = 0
sample_rate = 0
bitrate = 0
codec = None
codec_description = None
def __init__(self, atom, fileobj):
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("too short %r atom" % atom.name)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
# SampleEntry
r.skip(6 * 8) # reserved
r.skip(2 * 8) # data_ref_index
# AudioSampleEntry
r.skip(8 * 8) # reserved
self.channels = r.bits(16)
self.sample_size = r.bits(16)
r.skip(2 * 8) # pre_defined
r.skip(2 * 8) # reserved
self.sample_rate = r.bits(32) >> 16
except BitReaderError as e:
raise ASEntryError(e)
assert r.is_aligned()
try:
extra = Atom(fileobj)
except AtomError as e:
raise ASEntryError(e)
self.codec = atom.name.decode("latin-1")
self.codec_description = None
if atom.name == b"mp4a" and extra.name == b"esds":
self._parse_esds(extra, fileobj)
elif atom.name == b"alac" and extra.name == b"alac":
self._parse_alac(extra, fileobj)
elif atom.name == b"ac-3" and extra.name == b"dac3":
self._parse_dac3(extra, fileobj)
if self.codec_description is None:
self.codec_description = self.codec.upper()
def _parse_dac3(self, atom, fileobj):
# ETSI TS 102 366
assert atom.name == b"dac3"
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % atom.name)
fileobj = BytesIO(data)
r = BitReader(fileobj)
# sample_rate in AudioSampleEntry covers values in
# fscod2 and not just fscod, so ignore fscod here.
try:
r.skip(2 + 5 + 3) # fscod, bsid, bsmod
acmod = r.bits(3)
lfeon = r.bits(1)
bit_rate_code = r.bits(5)
r.skip(5) # reserved
except BitReaderError as e:
raise ASEntryError(e)
self.channels = [2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon
try:
self.bitrate = [
32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000
except IndexError:
pass
def _parse_alac(self, atom, fileobj):
# https://alac.macosforge.org/trac/browser/trunk/
# ALACMagicCookieDescription.txt
assert atom.name == b"alac"
ok, data = atom.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % atom.name)
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
# for some files the AudioSampleEntry values default to 44100/2chan
# and the real info is in the alac cookie, so prefer it
r.skip(32) # frameLength
compatibleVersion = r.bits(8)
if compatibleVersion != 0:
return
self.sample_size = r.bits(8)
r.skip(8 + 8 + 8)
self.channels = r.bits(8)
r.skip(16 + 32)
self.bitrate = r.bits(32)
self.sample_rate = r.bits(32)
except BitReaderError as e:
raise ASEntryError(e)
def _parse_esds(self, esds, fileobj):
assert esds.name == b"esds"
ok, data = esds.read(fileobj)
if not ok:
raise ASEntryError("truncated %s atom" % esds.name)
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if version != 0:
raise ASEntryError("Unsupported version %d" % version)
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
tag = r.bits(8)
if tag != ES_Descriptor.TAG:
raise ASEntryError("unexpected descriptor: %d" % tag)
assert r.is_aligned()
except BitReaderError as e:
raise ASEntryError(e)
try:
decSpecificInfo = ES_Descriptor.parse(fileobj)
except DescriptorError as e:
raise ASEntryError(e)
dec_conf_desc = decSpecificInfo.decConfigDescr
self.bitrate = dec_conf_desc.avgBitrate
self.codec += dec_conf_desc.codec_param
self.codec_description = dec_conf_desc.codec_desc
decSpecificInfo = dec_conf_desc.decSpecificInfo
if decSpecificInfo is not None:
if decSpecificInfo.channels != 0:
self.channels = decSpecificInfo.channels
if decSpecificInfo.sample_rate != 0:
self.sample_rate = decSpecificInfo.sample_rate
class DescriptorError(Exception):
pass
class BaseDescriptor(object):
TAG = None
@classmethod
def _parse_desc_length_file(cls, fileobj):
"""May raise ValueError"""
value = 0
for i in range(4):
try:
b = cdata.uint8(fileobj.read(1))
except cdata.error as e:
raise ValueError(e)
value = (value << 7) | (b & 0x7f)
if not b >> 7:
break
else:
raise ValueError("invalid descriptor length")
return value
@classmethod
def parse(cls, fileobj):
"""Returns a parsed instance of the called type.
The file position is right after the descriptor after this returns.
Raises DescriptorError
"""
try:
length = cls._parse_desc_length_file(fileobj)
except ValueError as e:
raise DescriptorError(e)
pos = fileobj.tell()
instance = cls(fileobj, length)
left = length - (fileobj.tell() - pos)
if left > 0:
fileobj.seek(left, 1)
else:
# XXX: In case the instance length is shorted than the content
# assume the size is wrong and just continue parsing
# https://github.com/quodlibet/mutagen/issues/444
pass
return instance
class ES_Descriptor(BaseDescriptor):
TAG = 0x3
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self.ES_ID = r.bits(16)
self.streamDependenceFlag = r.bits(1)
self.URL_Flag = r.bits(1)
self.OCRstreamFlag = r.bits(1)
self.streamPriority = r.bits(5)
if self.streamDependenceFlag:
self.dependsOn_ES_ID = r.bits(16)
if self.URL_Flag:
URLlength = r.bits(8)
self.URLstring = r.bytes(URLlength)
if self.OCRstreamFlag:
self.OCR_ES_Id = r.bits(16)
tag = r.bits(8)
except BitReaderError as e:
raise DescriptorError(e)
if tag != DecoderConfigDescriptor.TAG:
raise DescriptorError("unexpected DecoderConfigDescrTag %d" % tag)
assert r.is_aligned()
self.decConfigDescr = DecoderConfigDescriptor.parse(fileobj)
class DecoderConfigDescriptor(BaseDescriptor):
TAG = 0x4
decSpecificInfo = None
"""A DecoderSpecificInfo, optional"""
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self.objectTypeIndication = r.bits(8)
self.streamType = r.bits(6)
self.upStream = r.bits(1)
self.reserved = r.bits(1)
self.bufferSizeDB = r.bits(24)
self.maxBitrate = r.bits(32)
self.avgBitrate = r.bits(32)
if (self.objectTypeIndication, self.streamType) != (0x40, 0x5):
return
# all from here is optional
if length * 8 == r.get_position():
return
tag = r.bits(8)
except BitReaderError as e:
raise DescriptorError(e)
if tag == DecoderSpecificInfo.TAG:
assert r.is_aligned()
self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj)
@property
def codec_param(self):
"""string"""
param = u".%X" % self.objectTypeIndication
info = self.decSpecificInfo
if info is not None:
param += u".%d" % info.audioObjectType
return param
@property
def codec_desc(self):
"""string or None"""
info = self.decSpecificInfo
desc = None
if info is not None:
desc = info.description
return desc
class DecoderSpecificInfo(BaseDescriptor):
TAG = 0x5
_TYPE_NAMES = [
None, "AAC MAIN", "AAC LC", "AAC SSR", "AAC LTP", "SBR",
"AAC scalable", "TwinVQ", "CELP", "HVXC", None, None, "TTSI",
"Main synthetic", "Wavetable synthesis", "General MIDI",
"Algorithmic Synthesis and Audio FX", "ER AAC LC", None, "ER AAC LTP",
"ER AAC scalable", "ER Twin VQ", "ER BSAC", "ER AAC LD", "ER CELP",
"ER HVXC", "ER HILN", "ER Parametric", "SSC", "PS", "MPEG Surround",
None, "Layer-1", "Layer-2", "Layer-3", "DST", "ALS", "SLS",
"SLS non-core", "ER AAC ELD", "SMR Simple", "SMR Main", "USAC",
"SAOC", "LD MPEG Surround", "USAC"
]
_FREQS = [
96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000,
12000, 11025, 8000, 7350,
]
@property
def description(self):
"""string or None if unknown"""
name = None
try:
name = self._TYPE_NAMES[self.audioObjectType]
except IndexError:
pass
if name is None:
return
if self.sbrPresentFlag == 1:
name += "+SBR"
if self.psPresentFlag == 1:
name += "+PS"
return str(name)
@property
def sample_rate(self):
"""0 means unknown"""
if self.sbrPresentFlag == 1:
return self.extensionSamplingFrequency
elif self.sbrPresentFlag == 0:
return self.samplingFrequency
else:
# these are all types that support SBR
aot_can_sbr = (1, 2, 3, 4, 6, 17, 19, 20, 22)
if self.audioObjectType not in aot_can_sbr:
return self.samplingFrequency
# there shouldn't be SBR for > 48KHz
if self.samplingFrequency > 24000:
return self.samplingFrequency
# either samplingFrequency or samplingFrequency * 2
return 0
@property
def channels(self):
"""channel count or 0 for unknown"""
# from ProgramConfigElement()
if hasattr(self, "pce_channels"):
return self.pce_channels
conf = getattr(
self, "extensionChannelConfiguration", self.channelConfiguration)
if conf == 1:
if self.psPresentFlag == -1:
return 0
elif self.psPresentFlag == 1:
return 2
else:
return 1
elif conf == 7:
return 8
elif conf > 7:
return 0
else:
return conf
def _get_audio_object_type(self, r):
"""Raises BitReaderError"""
audioObjectType = r.bits(5)
if audioObjectType == 31:
audioObjectTypeExt = r.bits(6)
audioObjectType = 32 + audioObjectTypeExt
return audioObjectType
def _get_sampling_freq(self, r):
"""Raises BitReaderError"""
samplingFrequencyIndex = r.bits(4)
if samplingFrequencyIndex == 0xf:
samplingFrequency = r.bits(24)
else:
try:
samplingFrequency = self._FREQS[samplingFrequencyIndex]
except IndexError:
samplingFrequency = 0
return samplingFrequency
def __init__(self, fileobj, length):
"""Raises DescriptorError"""
r = BitReader(fileobj)
try:
self._parse(r, length)
except BitReaderError as e:
raise DescriptorError(e)
def _parse(self, r, length):
"""Raises BitReaderError"""
def bits_left():
return length * 8 - r.get_position()
self.audioObjectType = self._get_audio_object_type(r)
self.samplingFrequency = self._get_sampling_freq(r)
self.channelConfiguration = r.bits(4)
self.sbrPresentFlag = -1
self.psPresentFlag = -1
if self.audioObjectType in (5, 29):
self.extensionAudioObjectType = 5
self.sbrPresentFlag = 1
if self.audioObjectType == 29:
self.psPresentFlag = 1
self.extensionSamplingFrequency = self._get_sampling_freq(r)
self.audioObjectType = self._get_audio_object_type(r)
if self.audioObjectType == 22:
self.extensionChannelConfiguration = r.bits(4)
else:
self.extensionAudioObjectType = 0
if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23):
try:
GASpecificConfig(r, self)
except NotImplementedError:
# unsupported, (warn?)
return
else:
# unsupported
return
if self.audioObjectType in (
17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39):
epConfig = r.bits(2)
if epConfig in (2, 3):
# unsupported
return
if self.extensionAudioObjectType != 5 and bits_left() >= 16:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x2b7:
self.extensionAudioObjectType = self._get_audio_object_type(r)
if self.extensionAudioObjectType == 5:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
if bits_left() >= 12:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x548:
self.psPresentFlag = r.bits(1)
if self.extensionAudioObjectType == 22:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
self.extensionChannelConfiguration = r.bits(4)
def GASpecificConfig(r, info):
"""Reads GASpecificConfig which is needed to get the data after that
(there is no length defined to skip it) and to read program_config_element
which can contain channel counts.
May raise BitReaderError on error or
NotImplementedError if some reserved data was set.
"""
assert isinstance(info, DecoderSpecificInfo)
r.skip(1) # frameLengthFlag
dependsOnCoreCoder = r.bits(1)
if dependsOnCoreCoder:
r.skip(14)
extensionFlag = r.bits(1)
if not info.channelConfiguration:
pce = ProgramConfigElement(r)
info.pce_channels = pce.channels
if info.audioObjectType == 6 or info.audioObjectType == 20:
r.skip(3)
if extensionFlag:
if info.audioObjectType == 22:
r.skip(5 + 11)
if info.audioObjectType in (17, 19, 20, 23):
r.skip(1 + 1 + 1)
extensionFlag3 = r.bits(1)
if extensionFlag3 != 0:
raise NotImplementedError("extensionFlag3 set")
| 16,932
|
Python
|
.py
| 438
| 27.938356
| 79
| 0.571681
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,555
|
_objects.py
|
rembo10_headphones/lib/mutagen/asf/_objects.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
from mutagen._util import cdata, get_size
from mutagen._tags import PaddingInfo
from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError
from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute
class BaseObject(object):
"""Base ASF object."""
GUID = None
_TYPES = {}
def __init__(self):
self.objects = []
self.data = b""
def parse(self, asf, data):
self.data = data
def render(self, asf):
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
return data
def get_child(self, guid):
for obj in self.objects:
if obj.GUID == guid:
return obj
return None
@classmethod
def _register(cls, other):
cls._TYPES[other.GUID] = other
return other
@classmethod
def _get_object(cls, guid):
if guid in cls._TYPES:
return cls._TYPES[guid]()
else:
return UnknownObject(guid)
def __repr__(self):
return "<%s GUID=%s objects=%r>" % (
type(self).__name__, bytes2guid(self.GUID), self.objects)
def pprint(self):
l = []
l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID)))
for o in self.objects:
for e in o.pprint().splitlines():
l.append(" " + e)
return "\n".join(l)
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
super(UnknownObject, self).__init__()
assert isinstance(guid, bytes)
self.GUID = guid
@BaseObject._register
class HeaderObject(BaseObject):
"""ASF header."""
GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C")
@classmethod
def parse_full(cls, asf, fileobj):
"""Raises ASFHeaderError"""
header = cls()
remaining_header, num_objects = cls.parse_size(fileobj)
remaining_header -= 30
for i in range(num_objects):
obj_header_size = 24
if remaining_header < obj_header_size:
raise ASFHeaderError("invalid header size")
data = fileobj.read(obj_header_size)
if len(data) != obj_header_size:
raise ASFHeaderError("truncated")
remaining_header -= obj_header_size
guid, size = struct.unpack("<16sQ", data)
obj = BaseObject._get_object(guid)
payload_size = size - obj_header_size
if remaining_header < payload_size:
raise ASFHeaderError("invalid object size")
remaining_header -= payload_size
try:
data = fileobj.read(payload_size)
except (OverflowError, MemoryError):
# read doesn't take 64bit values
raise ASFHeaderError("invalid header size")
if len(data) != payload_size:
raise ASFHeaderError("truncated")
try:
obj.parse(asf, data)
except struct.error:
raise ASFHeaderError("truncated")
header.objects.append(obj)
return header
@classmethod
def parse_size(cls, fileobj):
"""Returns (size, num_objects)
Raises ASFHeaderError
"""
header = fileobj.read(30)
if len(header) != 30 or header[:16] != HeaderObject.GUID:
raise ASFHeaderError("Not an ASF file.")
return struct.unpack("<QL", header[16:28])
def render_full(self, asf, fileobj, available, padding_func):
# Render everything except padding
num_objects = 0
data = bytearray()
for obj in self.objects:
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
num_objects += 1
# calculate how much space we need at least
padding_obj = PaddingObject()
header_size = len(HeaderObject.GUID) + 14
padding_overhead = len(padding_obj.render(asf))
needed_size = len(data) + header_size + padding_overhead
# ask the user for padding adjustments
file_size = get_size(fileobj)
content_size = file_size - available
if content_size < 0:
raise ASFHeaderError("truncated content")
info = PaddingInfo(available - needed_size, content_size)
# add padding
padding = info._get_padding(padding_func)
padding_obj.parse(asf, b"\x00" * padding)
data += padding_obj.render(asf)
num_objects += 1
data = (HeaderObject.GUID +
struct.pack("<QL", len(data) + 30, num_objects) +
b"\x01\x02" + data)
return data
def parse(self, asf, data):
raise NotImplementedError
def render(self, asf):
raise NotImplementedError
@BaseObject._register
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = guid2bytes("75B22633-668E-11CF-A6D9-00AA0062CE6C")
NAMES = [
u"Title",
u"Author",
u"Copyright",
u"Description",
u"Rating",
]
def parse(self, asf, data):
super(ContentDescriptionObject, self).parse(asf, data)
lengths = struct.unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
if length > 0:
texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00"))
else:
texts.append(None)
pos = end
for key, value in zip(self.NAMES, texts):
if value is not None:
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
def render(self, asf):
def render_text(name):
value = asf.to_content_description.get(name)
if value is not None:
return str(value).encode("utf-16-le") + b"\x00\x00"
else:
return b""
texts = [render_text(x) for x in self.NAMES]
data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts)
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
@BaseObject._register
class ExtendedContentDescriptionObject(BaseObject):
"""Extended content description."""
GUID = guid2bytes("D2D0A440-E307-11D2-97F0-00A0C95EA850")
def parse(self, asf, data):
super(ExtendedContentDescriptionObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos + 2])
pos += 2
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value_type, value_length = struct.unpack("<HH", data[pos:pos + 4])
pos += 4
value = data[pos:pos + value_length]
pos += value_length
attr = ASFBaseAttribute._get_type(value_type)(data=value)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_extended_content_description.items()
data = b"".join(attr.render(name) for (name, attr) in attrs)
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
return self.GUID + data
@BaseObject._register
class FilePropertiesObject(BaseObject):
"""File properties."""
GUID = guid2bytes("8CABDCA1-A947-11CF-8EE4-00C00C205365")
def parse(self, asf, data):
super(FilePropertiesObject, self).parse(asf, data)
if len(data) < 64:
raise ASFError("invalid field property entry")
length, _, preroll = struct.unpack("<QQQ", data[40:64])
# there are files where preroll is larger than length, limit to >= 0
asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0)
@BaseObject._register
class StreamPropertiesObject(BaseObject):
"""Stream properties."""
GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365")
def parse(self, asf, data):
super(StreamPropertiesObject, self).parse(asf, data)
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = bitrate * 8
@BaseObject._register
class CodecListObject(BaseObject):
"""Codec List"""
GUID = guid2bytes("86D15240-311D-11D0-A3A4-00A0C90348F6")
def _parse_entry(self, data, offset):
"""can raise cdata.error"""
type_, offset = cdata.uint16_le_from(data, offset)
units, offset = cdata.uint16_le_from(data, offset)
# utf-16 code units, not characters..
next_offset = offset + units * 2
try:
name = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
name = u""
offset = next_offset
units, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + units * 2
try:
desc = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
desc = u""
offset = next_offset
bytes_, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + bytes_
codec = u""
if bytes_ == 2:
codec_id = cdata.uint16_le_from(data, offset)[0]
if codec_id in CODECS:
codec = CODECS[codec_id]
offset = next_offset
return offset, type_, name, desc, codec
def parse(self, asf, data):
super(CodecListObject, self).parse(asf, data)
offset = 16
count, offset = cdata.uint32_le_from(data, offset)
for i in range(count):
try:
offset, type_, name, desc, codec = \
self._parse_entry(data, offset)
except cdata.error:
raise ASFError("invalid codec entry")
# go with the first audio entry
if type_ == 2:
name = name.strip()
desc = desc.strip()
asf.info.codec_type = codec
asf.info.codec_name = name
asf.info.codec_description = desc
return
@BaseObject._register
class PaddingObject(BaseObject):
"""Padding object"""
GUID = guid2bytes("1806D474-CADF-4509-A4BA-9AABCB96AAE8")
@BaseObject._register
class StreamBitratePropertiesObject(BaseObject):
"""Stream bitrate properties"""
GUID = guid2bytes("7BF875CE-468D-11D1-8D82-006097C9A2B2")
@BaseObject._register
class ContentEncryptionObject(BaseObject):
"""Content encryption"""
GUID = guid2bytes("2211B3FB-BD23-11D2-B4B7-00A0C955FC6E")
@BaseObject._register
class ExtendedContentEncryptionObject(BaseObject):
"""Extended content encryption"""
GUID = guid2bytes("298AE614-2622-4C17-B935-DAE07EE9289C")
@BaseObject._register
class HeaderExtensionObject(BaseObject):
"""Header extension."""
GUID = guid2bytes("5FBF03B5-A92E-11CF-8EE3-00C00C205365")
def parse(self, asf, data):
super(HeaderExtensionObject, self).parse(asf, data)
datasize, = struct.unpack("<I", data[18:22])
datapos = 0
while datapos < datasize:
guid, size = struct.unpack(
"<16sQ", data[22 + datapos:22 + datapos + 24])
if size < 1:
raise ASFHeaderError("invalid size in header extension")
obj = BaseObject._get_object(guid)
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size])
self.objects.append(obj)
datapos += size
def render(self, asf):
data = bytearray()
for obj in self.objects:
# some files have the padding in the extension header, but we
# want to add it at the end of the top level header. Just
# skip padding at this level.
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
b"\x06\x00" + struct.pack("<I", len(data)) + data)
@BaseObject._register
class MetadataObject(BaseObject):
"""Metadata description."""
GUID = guid2bytes("C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA")
def parse(self, asf, data):
super(MetadataObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata.items()
data = b"".join([attr.render_m(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
@BaseObject._register
class MetadataLibraryObject(BaseObject):
"""Metadata library description."""
GUID = guid2bytes("44231C94-9498-49D1-A141-1D134E457054")
def parse(self, asf, data):
super(MetadataLibraryObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'language': language, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata_library
data = b"".join([attr.render_ml(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
| 15,241
|
Python
|
.py
| 367
| 31.964578
| 78
| 0.593583
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,556
|
_attrs.py
|
rembo10_headphones/lib/mutagen/asf/_attrs.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
import struct
from mutagen._util import total_ordering, reraise
from ._util import ASFError
class ASFBaseAttribute(object):
"""Generic attribute."""
TYPE = None
_TYPES = {}
value = None
"""The Python value of this attribute (type depends on the class)"""
language = None
"""Language"""
stream = None
"""Stream"""
def __init__(self, value=None, data=None, language=None,
stream=None, **kwargs):
self.language = language
self.stream = stream
if data is not None:
self.value = self.parse(data, **kwargs)
else:
if value is None:
# we used to support not passing any args and instead assign
# them later, keep that working..
self.value = None
else:
self.value = self._validate(value)
@classmethod
def _register(cls, other):
cls._TYPES[other.TYPE] = other
return other
@classmethod
def _get_type(cls, type_):
"""Raises KeyError"""
return cls._TYPES[type_]
def _validate(self, value):
"""Raises TypeError or ValueError in case the user supplied value
isn't valid.
"""
return value
def data_size(self):
raise NotImplementedError
def __repr__(self):
name = "%s(%r" % (type(self).__name__, self.value)
if self.language:
name += ", language=%d" % self.language
if self.stream:
name += ", stream=%d" % self.stream
name += ")"
return name
def render(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
data = self._render()
return (struct.pack("<H", len(name)) + name +
struct.pack("<HH", self.TYPE, len(data)) + data)
def render_m(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", 0, self.stream or 0, len(name),
self.TYPE, len(data)) + name + data)
def render_ml(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", self.language or 0, self.stream or 0,
len(name), self.TYPE, len(data)) + name + data)
@ASFBaseAttribute._register
@total_ordering
class ASFUnicodeAttribute(ASFBaseAttribute):
"""Unicode string attribute.
::
ASFUnicodeAttribute(u'some text')
"""
TYPE = 0x0000
def parse(self, data):
try:
return data.decode("utf-16-le").strip("\x00")
except UnicodeDecodeError as e:
reraise(ASFError, e, sys.exc_info()[2])
def _validate(self, value):
if not isinstance(value, str):
raise TypeError("%r not str" % value)
return value
def _render(self):
return self.value.encode("utf-16-le") + b"\x00\x00"
def data_size(self):
return len(self._render())
def __bytes__(self):
return self.value.encode("utf-16-le")
def __str__(self):
return self.value
def __eq__(self, other):
return str(self) == other
def __lt__(self, other):
return str(self) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFByteArrayAttribute(ASFBaseAttribute):
"""Byte array attribute.
::
ASFByteArrayAttribute(b'1234')
"""
TYPE = 0x0001
def parse(self, data):
assert isinstance(data, bytes)
return data
def _render(self):
assert isinstance(self.value, bytes)
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("must be bytes/str: %r" % value)
return value
def data_size(self):
return len(self.value)
def __bytes__(self):
return self.value
def __str__(self):
return "[binary data (%d bytes)]" % len(self.value)
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFBoolAttribute(ASFBaseAttribute):
"""Bool attribute.
::
ASFBoolAttribute(True)
"""
TYPE = 0x0002
def parse(self, data, dword=True):
if dword:
return struct.unpack("<I", data)[0] == 1
else:
return struct.unpack("<H", data)[0] == 1
def _render(self, dword=True):
if dword:
return struct.pack("<I", bool(self.value))
else:
return struct.pack("<H", bool(self.value))
def _validate(self, value):
return bool(value)
def data_size(self):
return 4
def __bool__(self):
return bool(self.value)
def __bytes__(self):
return str(self.value).encode('utf-8')
def __str__(self):
return str(self.value)
def __eq__(self, other):
return bool(self.value) == other
def __lt__(self, other):
return bool(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFDWordAttribute(ASFBaseAttribute):
"""DWORD attribute.
::
ASFDWordAttribute(42)
"""
TYPE = 0x0003
def parse(self, data):
return struct.unpack("<L", data)[0]
def _render(self):
return struct.pack("<L", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 32 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 4
def __int__(self):
return self.value
def __bytes__(self):
return str(self.value).encode('utf-8')
def __str__(self):
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFQWordAttribute(ASFBaseAttribute):
"""QWORD attribute.
::
ASFQWordAttribute(42)
"""
TYPE = 0x0004
def parse(self, data):
return struct.unpack("<Q", data)[0]
def _render(self):
return struct.pack("<Q", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 64 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 8
def __int__(self):
return self.value
def __bytes__(self):
return str(self.value).encode('utf-8')
def __str__(self):
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFWordAttribute(ASFBaseAttribute):
"""WORD attribute.
::
ASFWordAttribute(42)
"""
TYPE = 0x0005
def parse(self, data):
return struct.unpack("<H", data)[0]
def _render(self):
return struct.pack("<H", self.value)
def _validate(self, value):
value = int(value)
if not 0 <= value <= 2 ** 16 - 1:
raise ValueError("Out of range")
return value
def data_size(self):
return 2
def __int__(self):
return self.value
def __bytes__(self):
return str(self.value).encode('utf-8')
def __str__(self):
return str(self.value)
def __eq__(self, other):
return int(self.value) == other
def __lt__(self, other):
return int(self.value) < other
__hash__ = ASFBaseAttribute.__hash__
@ASFBaseAttribute._register
@total_ordering
class ASFGUIDAttribute(ASFBaseAttribute):
"""GUID attribute."""
TYPE = 0x0006
def parse(self, data):
assert isinstance(data, bytes)
return data
def _render(self):
assert isinstance(self.value, bytes)
return self.value
def _validate(self, value):
if not isinstance(value, bytes):
raise TypeError("must be bytes/str: %r" % value)
return value
def data_size(self):
return len(self.value)
def __bytes__(self):
return self.value
def __str__(self):
return repr(self.value)
def __eq__(self, other):
return self.value == other
def __lt__(self, other):
return self.value < other
__hash__ = ASFBaseAttribute.__hash__
def ASFValue(value, kind, **kwargs):
"""Create a tag value of a specific kind.
::
ASFValue(u"My Value", UNICODE)
:rtype: ASFBaseAttribute
:raises TypeError: in case a wrong type was passed
:raises ValueError: in case the value can't be be represented as ASFValue.
"""
try:
attr_type = ASFBaseAttribute._get_type(kind)
except KeyError:
raise ValueError("Unknown value type %r" % kind)
else:
return attr_type(value=value, **kwargs)
| 9,749
|
Python
|
.py
| 301
| 24.943522
| 78
| 0.591889
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,557
|
_util.py
|
rembo10_headphones/lib/mutagen/asf/_util.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
from mutagen._util import MutagenError
class error(MutagenError):
"""Error raised by :mod:`mutagen.asf`"""
class ASFError(error):
pass
class ASFHeaderError(error):
pass
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
])
def bytes2guid(s):
"""Converts a serialized GUID to a text GUID"""
assert isinstance(s, bytes)
u = struct.unpack
v = []
v.extend(u("<IHH", s[:8]))
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4}
CODECS = {
0x0000: u"Unknown Wave Format",
0x0001: u"Microsoft PCM Format",
0x0002: u"Microsoft ADPCM Format",
0x0003: u"IEEE Float",
0x0004: u"Compaq Computer VSELP",
0x0005: u"IBM CVSD",
0x0006: u"Microsoft CCITT A-Law",
0x0007: u"Microsoft CCITT u-Law",
0x0008: u"Microsoft DTS",
0x0009: u"Microsoft DRM",
0x000A: u"Windows Media Audio 9 Voice",
0x000B: u"Windows Media Audio 10 Voice",
0x000C: u"OGG Vorbis",
0x000D: u"FLAC",
0x000E: u"MOT AMR",
0x000F: u"Nice Systems IMBE",
0x0010: u"OKI ADPCM",
0x0011: u"Intel IMA ADPCM",
0x0012: u"Videologic MediaSpace ADPCM",
0x0013: u"Sierra Semiconductor ADPCM",
0x0014: u"Antex Electronics G.723 ADPCM",
0x0015: u"DSP Solutions DIGISTD",
0x0016: u"DSP Solutions DIGIFIX",
0x0017: u"Dialogic OKI ADPCM",
0x0018: u"MediaVision ADPCM",
0x0019: u"Hewlett-Packard CU codec",
0x001A: u"Hewlett-Packard Dynamic Voice",
0x0020: u"Yamaha ADPCM",
0x0021: u"Speech Compression SONARC",
0x0022: u"DSP Group True Speech",
0x0023: u"Echo Speech EchoSC1",
0x0024: u"Ahead Inc. Audiofile AF36",
0x0025: u"Audio Processing Technology APTX",
0x0026: u"Ahead Inc. AudioFile AF10",
0x0027: u"Aculab Prosody 1612",
0x0028: u"Merging Technologies S.A. LRC",
0x0030: u"Dolby Labs AC2",
0x0031: u"Microsoft GSM 6.10",
0x0032: u"Microsoft MSNAudio",
0x0033: u"Antex Electronics ADPCME",
0x0034: u"Control Resources VQLPC",
0x0035: u"DSP Solutions Digireal",
0x0036: u"DSP Solutions DigiADPCM",
0x0037: u"Control Resources CR10",
0x0038: u"Natural MicroSystems VBXADPCM",
0x0039: u"Crystal Semiconductor IMA ADPCM",
0x003A: u"Echo Speech EchoSC3",
0x003B: u"Rockwell ADPCM",
0x003C: u"Rockwell DigiTalk",
0x003D: u"Xebec Multimedia Solutions",
0x0040: u"Antex Electronics G.721 ADPCM",
0x0041: u"Antex Electronics G.728 CELP",
0x0042: u"Intel G.723",
0x0043: u"Intel G.723.1",
0x0044: u"Intel G.729 Audio",
0x0045: u"Sharp G.726 Audio",
0x0050: u"Microsoft MPEG-1",
0x0052: u"InSoft RT24",
0x0053: u"InSoft PAC",
0x0055: u"MP3 - MPEG Layer III",
0x0059: u"Lucent G.723",
0x0060: u"Cirrus Logic",
0x0061: u"ESS Technology ESPCM",
0x0062: u"Voxware File-Mode",
0x0063: u"Canopus Atrac",
0x0064: u"APICOM G.726 ADPCM",
0x0065: u"APICOM G.722 ADPCM",
0x0066: u"Microsoft DSAT",
0x0067: u"Microsoft DSAT Display",
0x0069: u"Voxware Byte Aligned",
0x0070: u"Voxware AC8",
0x0071: u"Voxware AC10",
0x0072: u"Voxware AC16",
0x0073: u"Voxware AC20",
0x0074: u"Voxware RT24 MetaVoice",
0x0075: u"Voxware RT29 MetaSound",
0x0076: u"Voxware RT29HW",
0x0077: u"Voxware VR12",
0x0078: u"Voxware VR18",
0x0079: u"Voxware TQ40",
0x007A: u"Voxware SC3",
0x007B: u"Voxware SC3",
0x0080: u"Softsound",
0x0081: u"Voxware TQ60",
0x0082: u"Microsoft MSRT24",
0x0083: u"AT&T Labs G.729A",
0x0084: u"Motion Pixels MVI MV12",
0x0085: u"DataFusion Systems G.726",
0x0086: u"DataFusion Systems GSM610",
0x0088: u"Iterated Systems ISIAudio",
0x0089: u"Onlive",
0x008A: u"Multitude FT SX20",
0x008B: u"Infocom ITS ACM G.721",
0x008C: u"Convedia G.729",
0x008D: u"Congruency Audio",
0x0091: u"Siemens Business Communications SBC24",
0x0092: u"Sonic Foundry Dolby AC3 SPDIF",
0x0093: u"MediaSonic G.723",
0x0094: u"Aculab Prosody 8KBPS",
0x0097: u"ZyXEL ADPCM",
0x0098: u"Philips LPCBB",
0x0099: u"Studer Professional Audio AG Packed",
0x00A0: u"Malden Electronics PHONYTALK",
0x00A1: u"Racal Recorder GSM",
0x00A2: u"Racal Recorder G720.a",
0x00A3: u"Racal Recorder G723.1",
0x00A4: u"Racal Recorder Tetra ACELP",
0x00B0: u"NEC AAC",
0x00FF: u"CoreAAC Audio",
0x0100: u"Rhetorex ADPCM",
0x0101: u"BeCubed Software IRAT",
0x0111: u"Vivo G.723",
0x0112: u"Vivo Siren",
0x0120: u"Philips CELP",
0x0121: u"Philips Grundig",
0x0123: u"Digital G.723",
0x0125: u"Sanyo ADPCM",
0x0130: u"Sipro Lab Telecom ACELP.net",
0x0131: u"Sipro Lab Telecom ACELP.4800",
0x0132: u"Sipro Lab Telecom ACELP.8V3",
0x0133: u"Sipro Lab Telecom ACELP.G.729",
0x0134: u"Sipro Lab Telecom ACELP.G.729A",
0x0135: u"Sipro Lab Telecom ACELP.KELVIN",
0x0136: u"VoiceAge AMR",
0x0140: u"Dictaphone G.726 ADPCM",
0x0141: u"Dictaphone CELP68",
0x0142: u"Dictaphone CELP54",
0x0150: u"Qualcomm PUREVOICE",
0x0151: u"Qualcomm HALFRATE",
0x0155: u"Ring Zero Systems TUBGSM",
0x0160: u"Windows Media Audio Standard",
0x0161: u"Windows Media Audio 9 Standard",
0x0162: u"Windows Media Audio 9 Professional",
0x0163: u"Windows Media Audio 9 Lossless",
0x0164: u"Windows Media Audio Pro over SPDIF",
0x0170: u"Unisys NAP ADPCM",
0x0171: u"Unisys NAP ULAW",
0x0172: u"Unisys NAP ALAW",
0x0173: u"Unisys NAP 16K",
0x0174: u"Sycom ACM SYC008",
0x0175: u"Sycom ACM SYC701 G725",
0x0176: u"Sycom ACM SYC701 CELP54",
0x0177: u"Sycom ACM SYC701 CELP68",
0x0178: u"Knowledge Adventure ADPCM",
0x0180: u"Fraunhofer IIS MPEG-2 AAC",
0x0190: u"Digital Theater Systems DTS",
0x0200: u"Creative Labs ADPCM",
0x0202: u"Creative Labs FastSpeech8",
0x0203: u"Creative Labs FastSpeech10",
0x0210: u"UHER informatic GmbH ADPCM",
0x0215: u"Ulead DV Audio",
0x0216: u"Ulead DV Audio",
0x0220: u"Quarterdeck",
0x0230: u"I-link Worldwide ILINK VC",
0x0240: u"Aureal Semiconductor RAW SPORT",
0x0249: u"Generic Passthru",
0x0250: u"Interactive Products HSX",
0x0251: u"Interactive Products RPELP",
0x0260: u"Consistent Software CS2",
0x0270: u"Sony SCX",
0x0271: u"Sony SCY",
0x0272: u"Sony ATRAC3",
0x0273: u"Sony SPC",
0x0280: u"Telum Audio",
0x0281: u"Telum IA Audio",
0x0285: u"Norcom Voice Systems ADPCM",
0x0300: u"Fujitsu TOWNS SND",
0x0350: u"Micronas SC4 Speech",
0x0351: u"Micronas CELP833",
0x0400: u"Brooktree BTV Digital",
0x0401: u"Intel Music Coder",
0x0402: u"Intel Audio",
0x0450: u"QDesign Music",
0x0500: u"On2 AVC0 Audio",
0x0501: u"On2 AVC1 Audio",
0x0680: u"AT&T Labs VME VMPCM",
0x0681: u"AT&T Labs TPC",
0x08AE: u"ClearJump Lightwave Lossless",
0x1000: u"Olivetti GSM",
0x1001: u"Olivetti ADPCM",
0x1002: u"Olivetti CELP",
0x1003: u"Olivetti SBC",
0x1004: u"Olivetti OPR",
0x1100: u"Lernout & Hauspie",
0x1101: u"Lernout & Hauspie CELP",
0x1102: u"Lernout & Hauspie SBC8",
0x1103: u"Lernout & Hauspie SBC12",
0x1104: u"Lernout & Hauspie SBC16",
0x1400: u"Norris Communication",
0x1401: u"ISIAudio",
0x1500: u"AT&T Labs Soundspace Music Compression",
0x1600: u"Microsoft MPEG ADTS AAC",
0x1601: u"Microsoft MPEG RAW AAC",
0x1608: u"Nokia MPEG ADTS AAC",
0x1609: u"Nokia MPEG RAW AAC",
0x181C: u"VoxWare MetaVoice RT24",
0x1971: u"Sonic Foundry Lossless",
0x1979: u"Innings Telecom ADPCM",
0x1FC4: u"NTCSoft ALF2CD ACM",
0x2000: u"Dolby AC3",
0x2001: u"DTS",
0x4143: u"Divio AAC",
0x4201: u"Nokia Adaptive Multi-Rate",
0x4243: u"Divio G.726",
0x4261: u"ITU-T H.261",
0x4263: u"ITU-T H.263",
0x4264: u"ITU-T H.264",
0x674F: u"Ogg Vorbis Mode 1",
0x6750: u"Ogg Vorbis Mode 2",
0x6751: u"Ogg Vorbis Mode 3",
0x676F: u"Ogg Vorbis Mode 1+",
0x6770: u"Ogg Vorbis Mode 2+",
0x6771: u"Ogg Vorbis Mode 3+",
0x7000: u"3COM NBX Audio",
0x706D: u"FAAD AAC Audio",
0x77A1: u"True Audio Lossless Audio",
0x7A21: u"GSM-AMR CBR 3GPP Audio",
0x7A22: u"GSM-AMR VBR 3GPP Audio",
0xA100: u"Comverse Infosys G723.1",
0xA101: u"Comverse Infosys AVQSBC",
0xA102: u"Comverse Infosys SBC",
0xA103: u"Symbol Technologies G729a",
0xA104: u"VoiceAge AMR WB",
0xA105: u"Ingenient Technologies G.726",
0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)",
0xA107: u"Encore Software Ltd's G.726",
0xA108: u"ZOLL Medical Corporation ASAO",
0xA109: u"Speex Voice",
0xA10A: u"Vianix MASC Speech Compression",
0xA10B: u"Windows Media 9 Spectrum Analyzer Output",
0xA10C: u"Media Foundation Spectrum Analyzer Output",
0xA10D: u"GSM 6.10 (Full-Rate) Speech",
0xA10E: u"GSM 6.20 (Half-Rate) Speech",
0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech",
0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech",
0xA111: u"GSM Adaptive Multi-Rate WideBand Speech",
0xA112: u"Polycom G.722",
0xA113: u"Polycom G.728",
0xA114: u"Polycom G.729a",
0xA115: u"Polycom Siren",
0xA116: u"Global IP Sound ILBC",
0xA117: u"Radio Time Time Shifted Radio",
0xA118: u"Nice Systems ACA",
0xA119: u"Nice Systems ADPCM",
0xA11A: u"Vocord Group ITU-T G.721",
0xA11B: u"Vocord Group ITU-T G.726",
0xA11C: u"Vocord Group ITU-T G.722.1",
0xA11D: u"Vocord Group ITU-T G.728",
0xA11E: u"Vocord Group ITU-T G.729",
0xA11F: u"Vocord Group ITU-T G.729a",
0xA120: u"Vocord Group ITU-T G.723.1",
0xA121: u"Vocord Group LBC",
0xA122: u"Nice G.728",
0xA123: u"France Telecom G.729 ACM Audio",
0xA124: u"CODIAN Audio",
0xCC12: u"Intel YUV12 Codec",
0xCFCC: u"Digital Processing Systems Perception Motion JPEG",
0xD261: u"DEC H.261",
0xD263: u"DEC H.263",
0xFFFE: u"Extensible Wave Format",
0xFFFF: u"Unregistered",
}
| 10,780
|
Python
|
.py
| 298
| 31.315436
| 79
| 0.673643
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,558
|
__init__.py
|
rembo10_headphones/lib/mutagen/asf/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write ASF (Window Media Audio) files."""
__all__ = ["ASF", "Open"]
from mutagen import FileType, Tags, StreamInfo
from mutagen._util import resize_bytes, DictMixin, loadfile, convert_error
from ._util import error, ASFError, ASFHeaderError
from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \
ExtendedContentDescriptionObject, HeaderExtensionObject, \
ContentDescriptionObject
from ._attrs import ASFGUIDAttribute, ASFWordAttribute, ASFQWordAttribute, \
ASFDWordAttribute, ASFBoolAttribute, ASFByteArrayAttribute, \
ASFUnicodeAttribute, ASFBaseAttribute, ASFValue
# flake8
error, ASFError, ASFHeaderError, ASFValue
class ASFInfo(StreamInfo):
"""ASFInfo()
ASF stream information.
Attributes:
length (`float`): "Length in seconds
sample_rate (`int`): Sample rate in Hz
bitrate (`int`): Bitrate in bps
channels (`int`): Number of channels
codec_type (`mutagen.text`): Name of the codec type of the first
audio stream or an empty string if unknown. Example:
``Windows Media Audio 9 Standard``
codec_name (`mutagen.text`): Name and maybe version of the codec used.
Example: ``Windows Media Audio 9.1``
codec_description (`mutagen.text`): Further information on the codec
used. Example: ``64 kbps, 48 kHz, stereo 2-pass CBR``
"""
length = 0.0
sample_rate = 0
bitrate = 0
channels = 0
codec_type = u""
codec_name = u""
codec_description = u""
def __init__(self):
self.length = 0.0
self.sample_rate = 0
self.bitrate = 0
self.channels = 0
self.codec_type = u""
self.codec_name = u""
self.codec_description = u""
def pprint(self):
"""Returns:
text: a stream information text summary
"""
s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % (
self.codec_type or self.codec_name or u"???", self.bitrate,
self.sample_rate, self.channels, self.length)
return s
class ASFTags(list, DictMixin, Tags):
"""ASFTags()
Dictionary containing ASF attributes.
"""
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
if isinstance(key, slice):
return list.__getitem__(self, key)
values = [value for (k, value) in self if k == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
if isinstance(key, slice):
return list.__delitem__(self, key)
to_delete = [x for x in self if x[0] == key]
if not to_delete:
raise KeyError(key)
else:
for k in to_delete:
self.remove(k)
def __contains__(self, key):
"""Return true if the key has any values."""
for k, value in self:
if k == key:
return True
else:
return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
if isinstance(key, slice):
return list.__setitem__(self, key, values)
if not isinstance(values, list):
values = [values]
to_append = []
for value in values:
if not isinstance(value, ASFBaseAttribute):
if isinstance(value, str):
value = ASFUnicodeAttribute(value)
elif isinstance(value, bytes):
value = ASFByteArrayAttribute(value)
elif isinstance(value, bool):
value = ASFBoolAttribute(value)
elif isinstance(value, int):
value = ASFDWordAttribute(value)
else:
raise TypeError("Invalid type %r" % type(value))
to_append.append((key, value))
try:
del(self[key])
except KeyError:
pass
self.extend(to_append)
def keys(self):
"""Return a sequence of all keys in the comment."""
return self and set(next(zip(*self)))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
d = {}
for key, value in self:
d.setdefault(key, []).append(value)
return d
def pprint(self):
"""Returns a string containing all key, value pairs.
:rtype: text
"""
return "\n".join("%s=%s" % (k, v) for k, v in self)
UNICODE = ASFUnicodeAttribute.TYPE
"""Unicode string type"""
BYTEARRAY = ASFByteArrayAttribute.TYPE
"""Byte array type"""
BOOL = ASFBoolAttribute.TYPE
"""Bool type"""
DWORD = ASFDWordAttribute.TYPE
""""DWord type (uint32)"""
QWORD = ASFQWordAttribute.TYPE
"""QWord type (uint64)"""
WORD = ASFWordAttribute.TYPE
"""Word type (uint16)"""
GUID = ASFGUIDAttribute.TYPE
"""GUID type"""
class ASF(FileType):
"""ASF(filething)
An ASF file, probably containing WMA or WMV.
Arguments:
filething (filething)
Attributes:
info (`ASFInfo`)
tags (`ASFTags`)
"""
_mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf",
"audio/x-wma", "video/x-wmv"]
info = None
tags = None
@convert_error(IOError, error)
@loadfile()
def load(self, filething):
"""load(filething)
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
fileobj = filething.fileobj
self.info = ASFInfo()
self.tags = ASFTags()
self._tags = {}
self._header = HeaderObject.parse_full(self, fileobj)
for guid in [ContentDescriptionObject.GUID,
ExtendedContentDescriptionObject.GUID,
MetadataObject.GUID,
MetadataLibraryObject.GUID]:
self.tags.extend(self._tags.pop(guid, []))
assert not self._tags
@convert_error(IOError, error)
@loadfile(writable=True)
def save(self, filething=None, padding=None):
"""save(filething=None, padding=None)
Save tag changes back to the loaded file.
Args:
filething (filething)
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
"""
# Move attributes to the right objects
self.to_content_description = {}
self.to_extended_content_description = {}
self.to_metadata = {}
self.to_metadata_library = []
for name, value in self.tags:
library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID)
can_cont_desc = value.TYPE == UNICODE
if library_only or value.language is not None:
self.to_metadata_library.append((name, value))
elif value.stream is not None:
if name not in self.to_metadata:
self.to_metadata[name] = value
else:
self.to_metadata_library.append((name, value))
elif name in ContentDescriptionObject.NAMES:
if name not in self.to_content_description and can_cont_desc:
self.to_content_description[name] = value
else:
self.to_metadata_library.append((name, value))
else:
if name not in self.to_extended_content_description:
self.to_extended_content_description[name] = value
else:
self.to_metadata_library.append((name, value))
# Add missing objects
header = self._header
if header.get_child(ContentDescriptionObject.GUID) is None:
header.objects.append(ContentDescriptionObject())
if header.get_child(ExtendedContentDescriptionObject.GUID) is None:
header.objects.append(ExtendedContentDescriptionObject())
header_ext = header.get_child(HeaderExtensionObject.GUID)
if header_ext is None:
header_ext = HeaderExtensionObject()
header.objects.append(header_ext)
if header_ext.get_child(MetadataObject.GUID) is None:
header_ext.objects.append(MetadataObject())
if header_ext.get_child(MetadataLibraryObject.GUID) is None:
header_ext.objects.append(MetadataLibraryObject())
fileobj = filething.fileobj
# Render to file
old_size = header.parse_size(fileobj)[0]
data = header.render_full(self, fileobj, old_size, padding)
size = len(data)
resize_bytes(fileobj, old_size, size, 0)
fileobj.seek(0)
fileobj.write(data)
def add_tags(self):
raise ASFError
@loadfile(writable=True)
def delete(self, filething=None):
"""delete(filething=None)
Args:
filething (filething)
Raises:
mutagen.MutagenError
"""
self.tags.clear()
self.save(filething, padding=lambda x: 0)
@staticmethod
def score(filename, fileobj, header):
return header.startswith(HeaderObject.GUID) * 2
Open = ASF
| 9,856
|
Python
|
.py
| 257
| 29.085603
| 78
| 0.603528
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,559
|
_util.py
|
rembo10_headphones/lib/mutagen/mp3/_util.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
http://wiki.hydrogenaud.io/index.php?title=MP3
"""
from __future__ import division
from functools import partial
from io import BytesIO
from mutagen._util import cdata, BitReader, iterbytes
class LAMEError(Exception):
pass
class LAMEHeader(object):
"""http://gabriel.mp3-tech.org/mp3infotag.html"""
vbr_method = 0
"""0: unknown, 1: CBR, 2: ABR, 3/4/5: VBR, others: see the docs"""
lowpass_filter = 0
"""lowpass filter value in Hz. 0 means unknown"""
quality = -1
"""Encoding quality: 0..9"""
vbr_quality = -1
"""VBR quality: 0..9"""
track_peak = None
"""Peak signal amplitude as float. 1.0 is maximal signal amplitude
in decoded format. None if unknown.
"""
track_gain_origin = 0
"""see the docs"""
track_gain_adjustment = None
"""Track gain adjustment as float (for 89db replay gain) or None"""
album_gain_origin = 0
"""see the docs"""
album_gain_adjustment = None
"""Album gain adjustment as float (for 89db replay gain) or None"""
encoding_flags = 0
"""see docs"""
ath_type = -1
"""see docs"""
bitrate = -1
"""Bitrate in kbps. For VBR the minimum bitrate, for anything else
(CBR, ABR, ..) the target bitrate.
"""
encoder_delay_start = 0
"""Encoder delay in samples"""
encoder_padding_end = 0
"""Padding in samples added at the end"""
source_sample_frequency_enum = -1
"""see docs"""
unwise_setting_used = False
"""see docs"""
stereo_mode = 0
"""see docs"""
noise_shaping = 0
"""see docs"""
mp3_gain = 0
"""Applied MP3 gain -127..127. Factor is 2 ** (mp3_gain / 4)"""
surround_info = 0
"""see docs"""
preset_used = 0
"""lame preset"""
music_length = 0
"""Length in bytes excluding any ID3 tags"""
music_crc = -1
"""CRC16 of the data specified by music_length"""
header_crc = -1
"""CRC16 of this header and everything before (not checked)"""
def __init__(self, xing, fileobj):
"""Raises LAMEError if parsing fails"""
payload = fileobj.read(27)
if len(payload) != 27:
raise LAMEError("Not enough data")
# extended lame header
r = BitReader(BytesIO(payload))
revision = r.bits(4)
if revision != 0:
raise LAMEError("unsupported header revision %d" % revision)
self.vbr_method = r.bits(4)
self.lowpass_filter = r.bits(8) * 100
# these have a different meaning for lame; expose them again here
self.quality = (100 - xing.vbr_scale) % 10
self.vbr_quality = (100 - xing.vbr_scale) // 10
track_peak_data = r.bytes(4)
if track_peak_data == b"\x00\x00\x00\x00":
self.track_peak = None
else:
# see PutLameVBR() in LAME's VbrTag.c
self.track_peak = cdata.uint32_be(track_peak_data) / 2 ** 23
track_gain_type = r.bits(3)
self.track_gain_origin = r.bits(3)
sign = r.bits(1)
gain_adj = r.bits(9) / 10.0
if sign:
gain_adj *= -1
if track_gain_type == 1:
self.track_gain_adjustment = gain_adj
else:
self.track_gain_adjustment = None
assert r.is_aligned()
album_gain_type = r.bits(3)
self.album_gain_origin = r.bits(3)
sign = r.bits(1)
album_gain_adj = r.bits(9) / 10.0
if album_gain_type == 2:
self.album_gain_adjustment = album_gain_adj
else:
self.album_gain_adjustment = None
self.encoding_flags = r.bits(4)
self.ath_type = r.bits(4)
self.bitrate = r.bits(8)
self.encoder_delay_start = r.bits(12)
self.encoder_padding_end = r.bits(12)
self.source_sample_frequency_enum = r.bits(2)
self.unwise_setting_used = r.bits(1)
self.stereo_mode = r.bits(3)
self.noise_shaping = r.bits(2)
sign = r.bits(1)
mp3_gain = r.bits(7)
if sign:
mp3_gain *= -1
self.mp3_gain = mp3_gain
r.skip(2)
self.surround_info = r.bits(3)
self.preset_used = r.bits(11)
self.music_length = r.bits(32)
self.music_crc = r.bits(16)
self.header_crc = r.bits(16)
assert r.is_aligned()
def guess_settings(self, major, minor):
"""Gives a guess about the encoder settings used. Returns an empty
string if unknown.
The guess is mostly correct in case the file was encoded with
the default options (-V --preset --alt-preset --abr -b etc) and no
other fancy options.
Args:
major (int)
minor (int)
Returns:
text
"""
version = major, minor
if self.vbr_method == 2:
if version in ((3, 90), (3, 91), (3, 92)) and self.encoding_flags:
if self.bitrate < 255:
return u"--alt-preset %d" % self.bitrate
else:
return u"--alt-preset %d+" % self.bitrate
if self.preset_used != 0:
return u"--preset %d" % self.preset_used
elif self.bitrate < 255:
return u"--abr %d" % self.bitrate
else:
return u"--abr %d+" % self.bitrate
elif self.vbr_method == 1:
if self.preset_used == 0:
if self.bitrate < 255:
return u"-b %d" % self.bitrate
else:
return u"-b 255+"
elif self.preset_used == 1003:
return u"--preset insane"
return u"-b %d" % self.preset_used
elif version in ((3, 90), (3, 91), (3, 92)):
preset_key = (self.vbr_quality, self.quality, self.vbr_method,
self.lowpass_filter, self.ath_type)
if preset_key == (1, 2, 4, 19500, 3):
return u"--preset r3mix"
if preset_key == (2, 2, 3, 19000, 4):
return u"--alt-preset standard"
if preset_key == (2, 2, 3, 19500, 2):
return u"--alt-preset extreme"
if self.vbr_method == 3:
return u"-V %s" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s --vbr-new" % self.vbr_quality
elif version in ((3, 93), (3, 94), (3, 95), (3, 96), (3, 97)):
if self.preset_used == 1001:
return u"--preset standard"
elif self.preset_used == 1002:
return u"--preset extreme"
elif self.preset_used == 1004:
return u"--preset fast standard"
elif self.preset_used == 1005:
return u"--preset fast extreme"
elif self.preset_used == 1006:
return u"--preset medium"
elif self.preset_used == 1007:
return u"--preset fast medium"
if self.vbr_method == 3:
return u"-V %s" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s --vbr-new" % self.vbr_quality
elif version == (3, 98):
if self.vbr_method == 3:
return u"-V %s --vbr-old" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s" % self.vbr_quality
elif version >= (3, 99):
if self.vbr_method == 3:
return u"-V %s --vbr-old" % self.vbr_quality
elif self.vbr_method in (4, 5):
p = self.vbr_quality
adjust_key = (p, self.bitrate, self.lowpass_filter)
# https://sourceforge.net/p/lame/bugs/455/
p = {
(5, 32, 0): 7,
(5, 8, 0): 8,
(6, 8, 0): 9,
}.get(adjust_key, p)
return u"-V %s" % p
return u""
@classmethod
def parse_version(cls, fileobj):
"""Returns a version string and True if a LAMEHeader follows.
The passed file object will be positioned right before the
lame header if True.
Raises LAMEError if there is no lame version info.
"""
# http://wiki.hydrogenaud.io/index.php?title=LAME_version_string
data = fileobj.read(20)
if len(data) != 20:
raise LAMEError("Not a lame header")
if not data.startswith((b"LAME", b"L3.99")):
raise LAMEError("Not a lame header")
data = data.lstrip(b"EMAL")
major, data = data[0:1], data[1:].lstrip(b".")
minor = b""
for c in iterbytes(data):
if not c.isdigit():
break
minor += c
data = data[len(minor):]
try:
major = int(major.decode("ascii"))
minor = int(minor.decode("ascii"))
except ValueError:
raise LAMEError
# the extended header was added sometimes in the 3.90 cycle
# e.g. "LAME3.90 (alpha)" should still stop here.
# (I have seen such a file)
if (major, minor) < (3, 90) or (
(major, minor) == (3, 90) and data[-11:-10] == b"("):
flag = data.strip(b"\x00").rstrip().decode("ascii")
return (major, minor), u"%d.%d%s" % (major, minor, flag), False
if len(data) < 11:
raise LAMEError("Invalid version: too long")
flag = data[:-11].rstrip(b"\x00")
flag_string = u""
patch = u""
if flag == b"a":
flag_string = u" (alpha)"
elif flag == b"b":
flag_string = u" (beta)"
elif flag == b"r":
patch = u".1+"
elif flag == b" ":
if (major, minor) > (3, 96):
patch = u".0"
else:
patch = u".0+"
elif flag == b"" or flag == b".":
patch = u".0+"
else:
flag_string = u" (?)"
# extended header, seek back to 9 bytes for the caller
fileobj.seek(-11, 1)
return (major, minor), \
u"%d.%d%s%s" % (major, minor, patch, flag_string), True
class XingHeaderError(Exception):
pass
class XingHeaderFlags(object):
FRAMES = 0x1
BYTES = 0x2
TOC = 0x4
VBR_SCALE = 0x8
class XingHeader(object):
frames = -1
"""Number of frames, -1 if unknown"""
bytes = -1
"""Number of bytes, -1 if unknown"""
toc = []
"""List of 100 file offsets in percent encoded as 0-255. E.g. entry
50 contains the file offset in percent at 50% play time.
Empty if unknown.
"""
vbr_scale = -1
"""VBR quality indicator 0-100. -1 if unknown"""
lame_header = None
"""A LAMEHeader instance or None"""
lame_version = (0, 0)
"""The LAME version as two element tuple (major, minor)"""
lame_version_desc = u""
"""The version of the LAME encoder e.g. '3.99.0'. Empty if unknown"""
is_info = False
"""If the header started with 'Info' and not 'Xing'"""
def __init__(self, fileobj):
"""Parses the Xing header or raises XingHeaderError.
The file position after this returns is undefined.
"""
data = fileobj.read(8)
if len(data) != 8 or data[:4] not in (b"Xing", b"Info"):
raise XingHeaderError("Not a Xing header")
self.is_info = (data[:4] == b"Info")
flags = cdata.uint32_be_from(data, 4)[0]
if flags & XingHeaderFlags.FRAMES:
data = fileobj.read(4)
if len(data) != 4:
raise XingHeaderError("Xing header truncated")
self.frames = cdata.uint32_be(data)
if flags & XingHeaderFlags.BYTES:
data = fileobj.read(4)
if len(data) != 4:
raise XingHeaderError("Xing header truncated")
self.bytes = cdata.uint32_be(data)
if flags & XingHeaderFlags.TOC:
data = fileobj.read(100)
if len(data) != 100:
raise XingHeaderError("Xing header truncated")
self.toc = list(bytearray(data))
if flags & XingHeaderFlags.VBR_SCALE:
data = fileobj.read(4)
if len(data) != 4:
raise XingHeaderError("Xing header truncated")
self.vbr_scale = cdata.uint32_be(data)
try:
self.lame_version, self.lame_version_desc, has_header = \
LAMEHeader.parse_version(fileobj)
if has_header:
self.lame_header = LAMEHeader(self, fileobj)
except LAMEError:
pass
def get_encoder_settings(self):
"""Returns the guessed encoder settings"""
if self.lame_header is None:
return u""
return self.lame_header.guess_settings(*self.lame_version)
@classmethod
def get_offset(cls, info):
"""Calculate the offset to the Xing header from the start of the
MPEG header including sync based on the MPEG header's content.
"""
assert info.layer == 3
if info.version == 1:
if info.mode != 3:
return 36
else:
return 21
else:
if info.mode != 3:
return 21
else:
return 13
class VBRIHeaderError(Exception):
pass
class VBRIHeader(object):
version = 0
"""VBRI header version"""
quality = 0
"""Quality indicator"""
bytes = 0
"""Number of bytes"""
frames = 0
"""Number of frames"""
toc_scale_factor = 0
"""Scale factor of TOC entries"""
toc_frames = 0
"""Number of frames per table entry"""
toc = []
"""TOC"""
def __init__(self, fileobj):
"""Reads the VBRI header or raises VBRIHeaderError.
The file position is undefined after this returns
"""
data = fileobj.read(26)
if len(data) != 26 or not data.startswith(b"VBRI"):
raise VBRIHeaderError("Not a VBRI header")
offset = 4
self.version, offset = cdata.uint16_be_from(data, offset)
if self.version != 1:
raise VBRIHeaderError(
"Unsupported header version: %r" % self.version)
offset += 2 # float16.. can't do
self.quality, offset = cdata.uint16_be_from(data, offset)
self.bytes, offset = cdata.uint32_be_from(data, offset)
self.frames, offset = cdata.uint32_be_from(data, offset)
toc_num_entries, offset = cdata.uint16_be_from(data, offset)
self.toc_scale_factor, offset = cdata.uint16_be_from(data, offset)
toc_entry_size, offset = cdata.uint16_be_from(data, offset)
self.toc_frames, offset = cdata.uint16_be_from(data, offset)
toc_size = toc_entry_size * toc_num_entries
toc_data = fileobj.read(toc_size)
if len(toc_data) != toc_size:
raise VBRIHeaderError("VBRI header truncated")
self.toc = []
if toc_entry_size == 2:
unpack = partial(cdata.uint16_be_from, toc_data)
elif toc_entry_size == 4:
unpack = partial(cdata.uint32_be_from, toc_data)
else:
raise VBRIHeaderError("Invalid TOC entry size")
self.toc = [unpack(i)[0] for i in range(0, toc_size, toc_entry_size)]
@classmethod
def get_offset(cls, info):
"""Offset in bytes from the start of the MPEG header including sync"""
assert info.layer == 3
return 36
| 15,866
|
Python
|
.py
| 410
| 28.912195
| 78
| 0.55515
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,560
|
__init__.py
|
rembo10_headphones/lib/mutagen/mp3/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""MPEG audio stream information and tags."""
import struct
from mutagen import StreamInfo
from mutagen._util import MutagenError, enum, BitReader, BitReaderError, \
convert_error, intround, endswith
from mutagen.id3 import ID3FileType, delete
from mutagen.id3._util import BitPaddedInt
from ._util import XingHeader, XingHeaderError, VBRIHeader, VBRIHeaderError
__all__ = ["MP3", "Open", "delete", "MP3"]
class error(MutagenError):
pass
class HeaderNotFoundError(error):
pass
class InvalidMPEGHeader(error):
pass
@enum
class BitrateMode(object):
UNKNOWN = 0
"""Probably a CBR file, but not sure"""
CBR = 1
"""Constant Bitrate"""
VBR = 2
"""Variable Bitrate"""
ABR = 3
"""Average Bitrate (a variant of VBR)"""
def _guess_xing_bitrate_mode(xing):
if xing.lame_header:
lame = xing.lame_header
if lame.vbr_method in (1, 8):
return BitrateMode.CBR
elif lame.vbr_method in (2, 9):
return BitrateMode.ABR
elif lame.vbr_method in (3, 4, 5, 6):
return BitrateMode.VBR
# everything else undefined, continue guessing
# info tags get only written by lame for cbr files
if xing.is_info:
return BitrateMode.CBR
# older lame and non-lame with some variant of vbr
if xing.vbr_scale != -1 or xing.lame_version_desc:
return BitrateMode.VBR
return BitrateMode.UNKNOWN
# Mode values.
STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4)
class MPEGFrame(object):
# Map (version, layer) tuples to bitrates.
__BITRATE = {
(1, 1): [0, 32, 64, 96, 128, 160, 192, 224,
256, 288, 320, 352, 384, 416, 448],
(1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384],
(1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112,
128, 160, 192, 224, 256, 320],
(2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128,
144, 160, 176, 192, 224, 256],
(2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64,
80, 96, 112, 128, 144, 160],
}
__BITRATE[(2, 3)] = __BITRATE[(2, 2)]
for i in range(1, 4):
__BITRATE[(2.5, i)] = __BITRATE[(2, i)]
# Map version to sample rates.
__RATES = {
1: [44100, 48000, 32000],
2: [22050, 24000, 16000],
2.5: [11025, 12000, 8000]
}
sketchy = False
def __init__(self, fileobj):
"""Raises HeaderNotFoundError"""
self.frame_offset = fileobj.tell()
r = BitReader(fileobj)
try:
if r.bits(11) != 0x7ff:
raise HeaderNotFoundError("invalid sync")
version = r.bits(2)
layer = r.bits(2)
protection = r.bits(1)
bitrate = r.bits(4)
sample_rate = r.bits(2)
padding = r.bits(1)
r.skip(1) # private
self.mode = r.bits(2)
r.skip(6)
except BitReaderError:
raise HeaderNotFoundError("truncated header")
assert r.get_position() == 32 and r.is_aligned()
# try to be strict here to redice the chance of a false positive
if version == 1 or layer == 0 or sample_rate == 0x3 or \
bitrate == 0xf or bitrate == 0:
raise HeaderNotFoundError("invalid header")
self.channels = 1 if self.mode == MONO else 2
self.version = [2.5, None, 2, 1][version]
self.layer = 4 - layer
self.protected = not protection
self.padding = bool(padding)
self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate]
self.bitrate *= 1000
self.sample_rate = self.__RATES[self.version][sample_rate]
if self.layer == 1:
frame_size = 384
slot = 4
elif self.version >= 2 and self.layer == 3:
frame_size = 576
slot = 1
else:
frame_size = 1152
slot = 1
frame_length = (
((frame_size // 8 * self.bitrate) // self.sample_rate) +
padding) * slot
self.sketchy = True
# Try to find/parse the Xing header, which trumps the above length
# and bitrate calculation.
if self.layer == 3:
self._parse_vbr_header(fileobj, self.frame_offset, frame_size,
frame_length)
fileobj.seek(self.frame_offset + frame_length, 0)
def _parse_vbr_header(self, fileobj, frame_offset, frame_size,
frame_length):
"""Does not raise"""
# Xing
xing_offset = XingHeader.get_offset(self)
fileobj.seek(frame_offset + xing_offset, 0)
try:
xing = XingHeader(fileobj)
except XingHeaderError:
pass
else:
lame = xing.lame_header
self.sketchy = False
self.bitrate_mode = _guess_xing_bitrate_mode(xing)
self.encoder_settings = xing.get_encoder_settings()
if xing.frames != -1:
samples = frame_size * xing.frames
if xing.bytes != -1 and samples > 0:
# the first frame is only included in xing.bytes but
# not in xing.frames, skip it.
audio_bytes = max(0, xing.bytes - frame_length)
self.bitrate = intround((
audio_bytes * 8 * self.sample_rate) / float(samples))
if lame is not None:
samples -= lame.encoder_delay_start
samples -= lame.encoder_padding_end
if samples < 0:
# older lame versions wrote bogus delay/padding for short
# files with low bitrate
samples = 0
self.length = float(samples) / self.sample_rate
if xing.lame_version_desc:
self.encoder_info = u"LAME %s" % xing.lame_version_desc
if lame is not None:
self.track_gain = lame.track_gain_adjustment
self.track_peak = lame.track_peak
self.album_gain = lame.album_gain_adjustment
return
# VBRI
vbri_offset = VBRIHeader.get_offset(self)
fileobj.seek(frame_offset + vbri_offset, 0)
try:
vbri = VBRIHeader(fileobj)
except VBRIHeaderError:
pass
else:
self.bitrate_mode = BitrateMode.VBR
self.encoder_info = u"FhG"
self.sketchy = False
self.length = float(frame_size * vbri.frames) / self.sample_rate
if self.length:
self.bitrate = int((vbri.bytes * 8) / self.length)
def skip_id3(fileobj):
"""Might raise IOError"""
# WMP writes multiple id3s, so skip as many as we find
while True:
idata = fileobj.read(10)
try:
id3, insize = struct.unpack('>3sxxx4s', idata)
except struct.error:
id3, insize = b'', 0
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize > 0:
fileobj.seek(insize, 1)
else:
fileobj.seek(-len(idata), 1)
break
def iter_sync(fileobj, max_read):
"""Iterate over a fileobj and yields on each mpeg sync.
When yielding the fileobj offset is right before the sync and can be
changed between iterations without affecting the iteration process.
Might raise IOError.
"""
read = 0
size = 2
last_byte = b""
is_second = lambda b: ord(b) & 0xe0 == 0xe0
while read < max_read:
data_offset = fileobj.tell()
new_data = fileobj.read(min(max_read - read, size))
if not new_data:
return
read += len(new_data)
if last_byte == b"\xff" and is_second(new_data[0:1]):
fileobj.seek(data_offset - 1, 0)
yield
size *= 2
last_byte = new_data[-1:]
find_offset = 0
while True:
index = new_data.find(b"\xff", find_offset)
# if not found or the last byte -> read more
if index == -1 or index == len(new_data) - 1:
break
if is_second(new_data[index + 1:index + 2]):
fileobj.seek(data_offset + index, 0)
yield
find_offset = index + 1
fileobj.seek(data_offset + len(new_data), 0)
class MPEGInfo(StreamInfo):
"""MPEGInfo()
MPEG audio stream information
Parse information about an MPEG audio file. This also reads the
Xing VBR header format.
This code was implemented based on the format documentation at
http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm.
Useful attributes:
Attributes:
length (`float`): audio length, in seconds
channels (`int`): number of audio channels
bitrate (`int`): audio bitrate, in bits per second.
In case :attr:`bitrate_mode` is :attr:`BitrateMode.UNKNOWN` the
bitrate is guessed based on the first frame.
sample_rate (`int`): audio sample rate, in Hz
encoder_info (`mutagen.text`): a string containing encoder name and
possibly version. In case a lame tag is present this will start
with ``"LAME "``, if unknown it is empty, otherwise the
text format is undefined.
encoder_settings (`mutagen.text`): a string containing a guess about
the settings used for encoding. The format is undefined and
depends on the encoder.
bitrate_mode (`BitrateMode`): a :class:`BitrateMode`
track_gain (`float` or `None`): replaygain track gain (89db) or None
track_peak (`float` or `None`): replaygain track peak or None
album_gain (`float` or `None`): replaygain album gain (89db) or None
Useless attributes:
Attributes:
version (`float`): MPEG version (1, 2, 2.5)
layer (`int`): 1, 2, or 3
mode (`int`): One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3)
protected (`bool`): whether or not the file is "protected"
sketchy (`bool`): if true, the file may not be valid MPEG audio
"""
sketchy = False
encoder_info = u""
encoder_settings = u""
bitrate_mode = BitrateMode.UNKNOWN
track_gain = track_peak = album_gain = album_peak = None
@convert_error(IOError, error)
def __init__(self, fileobj, offset=None):
"""Parse MPEG stream information from a file-like object.
If an offset argument is given, it is used to start looking
for stream information and Xing headers; otherwise, ID3v2 tags
will be skipped automatically. A correct offset can make
loading files significantly faster.
Raises HeaderNotFoundError, error
"""
if offset is None:
fileobj.seek(0, 0)
else:
fileobj.seek(offset, 0)
# skip anyway, because wmp stacks multiple id3 tags
skip_id3(fileobj)
# find a sync in the first 1024K, give up after some invalid syncs
max_read = 1024 * 1024
max_syncs = 1500
enough_frames = 4
min_frames = 2
self.sketchy = True
frames = []
first_frame = None
for _ in iter_sync(fileobj, max_read):
max_syncs -= 1
if max_syncs <= 0:
break
for _ in range(enough_frames):
try:
frame = MPEGFrame(fileobj)
except HeaderNotFoundError:
break
frames.append(frame)
if not frame.sketchy:
break
# if we have min frames, save it in case this is all we get
if len(frames) >= min_frames and first_frame is None:
first_frame = frames[0]
# if the last frame was a non-sketchy one (has a valid vbr header)
# we use that
if frames and not frames[-1].sketchy:
first_frame = frames[-1]
self.sketchy = False
break
# if we have enough valid frames, use the first
if len(frames) >= enough_frames:
first_frame = frames[0]
self.sketchy = False
break
# otherwise start over with the next sync
del frames[:]
if first_frame is None:
raise HeaderNotFoundError("can't sync to MPEG frame")
assert first_frame
self.length = -1
sketchy = self.sketchy
self.__dict__.update(first_frame.__dict__)
self.sketchy = sketchy
# no length, estimate based on file size
if self.length == -1:
fileobj.seek(0, 2)
content_size = fileobj.tell() - first_frame.frame_offset
self.length = 8 * content_size / float(self.bitrate)
def pprint(self):
info = str(self.bitrate_mode).split(".", 1)[-1]
if self.bitrate_mode == BitrateMode.UNKNOWN:
info = u"CBR?"
if self.encoder_info:
info += ", %s" % self.encoder_info
if self.encoder_settings:
info += ", %s" % self.encoder_settings
s = u"MPEG %s layer %d, %d bps (%s), %s Hz, %d chn, %.2f seconds" % (
self.version, self.layer, self.bitrate, info,
self.sample_rate, self.channels, self.length)
if self.sketchy:
s += u" (sketchy)"
return s
class MP3(ID3FileType):
"""MP3(filething)
An MPEG audio (usually MPEG-1 Layer 3) file.
Arguments:
filething (filething)
Attributes:
info (`MPEGInfo`)
tags (`mutagen.id3.ID3`)
"""
_Info = MPEGInfo
_mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"]
@property
def mime(self):
l = self.info.layer
return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime
@staticmethod
def score(filename, fileobj, header_data):
filename = filename.lower()
return (header_data.startswith(b"ID3") * 2 +
endswith(filename, b".mp3") +
endswith(filename, b".mp2") + endswith(filename, b".mpg") +
endswith(filename, b".mpeg"))
Open = MP3
class EasyMP3(MP3):
"""EasyMP3(filething)
Like MP3, but uses EasyID3 for tags.
Arguments:
filething (filething)
Attributes:
info (`MPEGInfo`)
tags (`mutagen.easyid3.EasyID3`)
"""
from mutagen.easyid3 import EasyID3 as ID3
ID3 = ID3
| 14,893
|
Python
|
.py
| 374
| 29.799465
| 78
| 0.574561
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,561
|
util.py
|
rembo10_headphones/lib/soupsieve/util.py
|
"""Utility."""
from functools import wraps, lru_cache
import warnings
import re
from typing import Callable, Any, Optional, Tuple, List
DEBUG = 0x00001
RE_PATTERN_LINE_SPLIT = re.compile(r'(?:\r\n|(?!\r\n)[\n\r])|$')
UC_A = ord('A')
UC_Z = ord('Z')
@lru_cache(maxsize=512)
def lower(string: str) -> str:
"""Lower."""
new_string = []
for c in string:
o = ord(c)
new_string.append(chr(o + 32) if UC_A <= o <= UC_Z else c)
return ''.join(new_string)
class SelectorSyntaxError(Exception):
"""Syntax error in a CSS selector."""
def __init__(self, msg: str, pattern: Optional[str] = None, index: Optional[int] = None) -> None:
"""Initialize."""
self.line = None
self.col = None
self.context = None
if pattern is not None and index is not None:
# Format pattern to show line and column position
self.context, self.line, self.col = get_pattern_context(pattern, index)
msg = '{}\n line {}:\n{}'.format(msg, self.line, self.context)
super().__init__(msg)
def deprecated(message: str, stacklevel: int = 2) -> Callable[..., Any]: # pragma: no cover
"""
Raise a `DeprecationWarning` when wrapped function/method is called.
Usage:
@deprecated("This method will be removed in version X; use Y instead.")
def some_method()"
pass
"""
def _wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
@wraps(func)
def _deprecated_func(*args: Any, **kwargs: Any) -> Any:
warnings.warn(
f"'{func.__name__}' is deprecated. {message}",
category=DeprecationWarning,
stacklevel=stacklevel
)
return func(*args, **kwargs)
return _deprecated_func
return _wrapper
def warn_deprecated(message: str, stacklevel: int = 2) -> None: # pragma: no cover
"""Warn deprecated."""
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=stacklevel
)
def get_pattern_context(pattern: str, index: int) -> Tuple[str, int, int]:
"""Get the pattern context."""
last = 0
current_line = 1
col = 1
text = [] # type: List[str]
line = 1
offset = None # type: Optional[int]
# Split pattern by newline and handle the text before the newline
for m in RE_PATTERN_LINE_SPLIT.finditer(pattern):
linetext = pattern[last:m.start(0)]
if not len(m.group(0)) and not len(text):
indent = ''
offset = -1
col = index - last + 1
elif last <= index < m.end(0):
indent = '--> '
offset = (-1 if index > m.start(0) else 0) + 3
col = index - last + 1
else:
indent = ' '
offset = None
if len(text):
# Regardless of whether we are presented with `\r\n`, `\r`, or `\n`,
# we will render the output with just `\n`. We will still log the column
# correctly though.
text.append('\n')
text.append('{}{}'.format(indent, linetext))
if offset is not None:
text.append('\n')
text.append(' ' * (col + offset) + '^')
line = current_line
current_line += 1
last = m.end(0)
return ''.join(text), line, col
| 3,371
|
Python
|
.py
| 90
| 29.366667
| 101
| 0.567127
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,562
|
css_match.py
|
rembo10_headphones/lib/soupsieve/css_match.py
|
"""CSS matcher."""
from datetime import datetime
from . import util
import re
from . import css_types as ct
import unicodedata
import bs4 # type: ignore[import]
from typing import Iterator, Iterable, List, Any, Optional, Tuple, Union, Dict, Callable, Sequence, cast
# Empty tag pattern (whitespace okay)
RE_NOT_EMPTY = re.compile('[^ \t\r\n\f]')
RE_NOT_WS = re.compile('[^ \t\r\n\f]+')
# Relationships
REL_PARENT = ' '
REL_CLOSE_PARENT = '>'
REL_SIBLING = '~'
REL_CLOSE_SIBLING = '+'
# Relationships for :has() (forward looking)
REL_HAS_PARENT = ': '
REL_HAS_CLOSE_PARENT = ':>'
REL_HAS_SIBLING = ':~'
REL_HAS_CLOSE_SIBLING = ':+'
NS_XHTML = 'http://www.w3.org/1999/xhtml'
NS_XML = 'http://www.w3.org/XML/1998/namespace'
DIR_FLAGS = ct.SEL_DIR_LTR | ct.SEL_DIR_RTL
RANGES = ct.SEL_IN_RANGE | ct.SEL_OUT_OF_RANGE
DIR_MAP = {
'ltr': ct.SEL_DIR_LTR,
'rtl': ct.SEL_DIR_RTL,
'auto': 0
}
RE_NUM = re.compile(r"^(?P<value>-?(?:[0-9]{1,}(\.[0-9]+)?|\.[0-9]+))$")
RE_TIME = re.compile(r'^(?P<hour>[0-9]{2}):(?P<minutes>[0-9]{2})$')
RE_MONTH = re.compile(r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})$')
RE_WEEK = re.compile(r'^(?P<year>[0-9]{4,})-W(?P<week>[0-9]{2})$')
RE_DATE = re.compile(r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})$')
RE_DATETIME = re.compile(
r'^(?P<year>[0-9]{4,})-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})T(?P<hour>[0-9]{2}):(?P<minutes>[0-9]{2})$'
)
RE_WILD_STRIP = re.compile(r'(?:(?:-\*-)(?:\*(?:-|$))*|-\*$)')
MONTHS_30 = (4, 6, 9, 11) # April, June, September, and November
FEB = 2
SHORT_MONTH = 30
LONG_MONTH = 31
FEB_MONTH = 28
FEB_LEAP_MONTH = 29
DAYS_IN_WEEK = 7
class _FakeParent:
"""
Fake parent class.
When we have a fragment with no `BeautifulSoup` document object,
we can't evaluate `nth` selectors properly. Create a temporary
fake parent so we can traverse the root element as a child.
"""
def __init__(self, element: 'bs4.Tag') -> None:
"""Initialize."""
self.contents = [element]
def __len__(self) -> 'bs4.PageElement':
"""Length."""
return len(self.contents)
class _DocumentNav:
"""Navigate a Beautiful Soup document."""
@classmethod
def assert_valid_input(cls, tag: Any) -> None:
"""Check if valid input tag or document."""
# Fail on unexpected types.
if not cls.is_tag(tag):
raise TypeError("Expected a BeautifulSoup 'Tag', but instead recieved type {}".format(type(tag)))
@staticmethod
def is_doc(obj: 'bs4.Tag') -> bool:
"""Is `BeautifulSoup` object."""
return isinstance(obj, bs4.BeautifulSoup)
@staticmethod
def is_tag(obj: 'bs4.PageElement') -> bool:
"""Is tag."""
return isinstance(obj, bs4.Tag)
@staticmethod
def is_declaration(obj: 'bs4.PageElement') -> bool: # pragma: no cover
"""Is declaration."""
return isinstance(obj, bs4.Declaration)
@staticmethod
def is_cdata(obj: 'bs4.PageElement') -> bool:
"""Is CDATA."""
return isinstance(obj, bs4.CData)
@staticmethod
def is_processing_instruction(obj: 'bs4.PageElement') -> bool: # pragma: no cover
"""Is processing instruction."""
return isinstance(obj, bs4.ProcessingInstruction)
@staticmethod
def is_navigable_string(obj: 'bs4.PageElement') -> bool:
"""Is navigable string."""
return isinstance(obj, bs4.NavigableString)
@staticmethod
def is_special_string(obj: 'bs4.PageElement') -> bool:
"""Is special string."""
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction, bs4.Doctype))
@classmethod
def is_content_string(cls, obj: 'bs4.PageElement') -> bool:
"""Check if node is content string."""
return cls.is_navigable_string(obj) and not cls.is_special_string(obj)
@staticmethod
def create_fake_parent(el: 'bs4.Tag') -> _FakeParent:
"""Create fake parent for a given element."""
return _FakeParent(el)
@staticmethod
def is_xml_tree(el: 'bs4.Tag') -> bool:
"""Check if element (or document) is from a XML tree."""
return bool(el._is_xml)
def is_iframe(self, el: 'bs4.Tag') -> bool:
"""Check if element is an `iframe`."""
return bool(
((el.name if self.is_xml_tree(el) else util.lower(el.name)) == 'iframe') and
self.is_html_tag(el) # type: ignore[attr-defined]
)
def is_root(self, el: 'bs4.Tag') -> bool:
"""
Return whether element is a root element.
We check that the element is the root of the tree (which we have already pre-calculated),
and we check if it is the root element under an `iframe`.
"""
root = self.root and self.root is el # type: ignore[attr-defined]
if not root:
parent = self.get_parent(el)
root = parent is not None and self.is_html and self.is_iframe(parent) # type: ignore[attr-defined]
return root
def get_contents(self, el: 'bs4.Tag', no_iframe: bool = False) -> Iterator['bs4.PageElement']:
"""Get contents or contents in reverse."""
if not no_iframe or not self.is_iframe(el):
for content in el.contents:
yield content
def get_children(
self,
el: 'bs4.Tag',
start: Optional[int] = None,
reverse: bool = False,
tags: bool = True,
no_iframe: bool = False
) -> Iterator['bs4.PageElement']:
"""Get children."""
if not no_iframe or not self.is_iframe(el):
last = len(el.contents) - 1
if start is None:
index = last if reverse else 0
else:
index = start
end = -1 if reverse else last + 1
incr = -1 if reverse else 1
if 0 <= index <= last:
while index != end:
node = el.contents[index]
index += incr
if not tags or self.is_tag(node):
yield node
def get_descendants(
self,
el: 'bs4.Tag',
tags: bool = True,
no_iframe: bool = False
) -> Iterator['bs4.PageElement']:
"""Get descendants."""
if not no_iframe or not self.is_iframe(el):
next_good = None
for child in el.descendants:
if next_good is not None:
if child is not next_good:
continue
next_good = None
is_tag = self.is_tag(child)
if no_iframe and is_tag and self.is_iframe(child):
if child.next_sibling is not None:
next_good = child.next_sibling
else:
last_child = child
while self.is_tag(last_child) and last_child.contents:
last_child = last_child.contents[-1]
next_good = last_child.next_element
yield child
if next_good is None:
break
# Coverage isn't seeing this even though it's executed
continue # pragma: no cover
if not tags or is_tag:
yield child
def get_parent(self, el: 'bs4.Tag', no_iframe: bool = False) -> 'bs4.Tag':
"""Get parent."""
parent = el.parent
if no_iframe and parent is not None and self.is_iframe(parent):
parent = None
return parent
@staticmethod
def get_tag_name(el: 'bs4.Tag') -> Optional[str]:
"""Get tag."""
return cast(Optional[str], el.name)
@staticmethod
def get_prefix_name(el: 'bs4.Tag') -> Optional[str]:
"""Get prefix."""
return cast(Optional[str], el.prefix)
@staticmethod
def get_uri(el: 'bs4.Tag') -> Optional[str]:
"""Get namespace `URI`."""
return cast(Optional[str], el.namespace)
@classmethod
def get_next(cls, el: 'bs4.Tag', tags: bool = True) -> 'bs4.PageElement':
"""Get next sibling tag."""
sibling = el.next_sibling
while tags and not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.next_sibling
return sibling
@classmethod
def get_previous(cls, el: 'bs4.Tag', tags: bool = True) -> 'bs4.PageElement':
"""Get previous sibling tag."""
sibling = el.previous_sibling
while tags and not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.previous_sibling
return sibling
@staticmethod
def has_html_ns(el: 'bs4.Tag') -> bool:
"""
Check if element has an HTML namespace.
This is a bit different than whether a element is treated as having an HTML namespace,
like we do in the case of `is_html_tag`.
"""
ns = getattr(el, 'namespace') if el else None
return bool(ns and ns == NS_XHTML)
@staticmethod
def split_namespace(el: 'bs4.Tag', attr_name: str) -> Tuple[Optional[str], Optional[str]]:
"""Return namespace and attribute name without the prefix."""
return getattr(attr_name, 'namespace', None), getattr(attr_name, 'name', None)
@classmethod
def normalize_value(cls, value: Any) -> Union[str, Sequence[str]]:
"""Normalize the value to be a string or list of strings."""
# Treat `None` as empty string.
if value is None:
return ''
# Pass through strings
if (isinstance(value, str)):
return value
# If it's a byte string, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# BeautifulSoup supports sequences of attribute values, so make sure the children are strings.
if isinstance(value, Sequence):
new_value = []
for v in value:
if not isinstance(v, (str, bytes)) and isinstance(v, Sequence):
# This is most certainly a user error and will crash and burn later.
# To keep things working, we'll do what we do with all objects,
# And convert them to strings.
new_value.append(str(v))
else:
# Convert the child to a string
new_value.append(cast(str, cls.normalize_value(v)))
return new_value
# Try and make anything else a string
return str(value)
@classmethod
def get_attribute_by_name(
cls,
el: 'bs4.Tag',
name: str,
default: Optional[Union[str, Sequence[str]]] = None
) -> Optional[Union[str, Sequence[str]]]:
"""Get attribute by name."""
value = default
if el._is_xml:
try:
value = cls.normalize_value(el.attrs[name])
except KeyError:
pass
else:
for k, v in el.attrs.items():
if util.lower(k) == name:
value = cls.normalize_value(v)
break
return value
@classmethod
def iter_attributes(cls, el: 'bs4.Tag') -> Iterator[Tuple[str, Optional[Union[str, Sequence[str]]]]]:
"""Iterate attributes."""
for k, v in el.attrs.items():
yield k, cls.normalize_value(v)
@classmethod
def get_classes(cls, el: 'bs4.Tag') -> Sequence[str]:
"""Get classes."""
classes = cls.get_attribute_by_name(el, 'class', [])
if isinstance(classes, str):
classes = RE_NOT_WS.findall(classes)
return cast(Sequence[str], classes)
def get_text(self, el: 'bs4.Tag', no_iframe: bool = False) -> str:
"""Get text."""
return ''.join(
[node for node in self.get_descendants(el, tags=False, no_iframe=no_iframe) if self.is_content_string(node)]
)
def get_own_text(self, el: 'bs4.Tag', no_iframe: bool = False) -> List[str]:
"""Get Own Text."""
return [node for node in self.get_contents(el, no_iframe=no_iframe) if self.is_content_string(node)]
class Inputs:
"""Class for parsing and validating input items."""
@staticmethod
def validate_day(year: int, month: int, day: int) -> bool:
"""Validate day."""
max_days = LONG_MONTH
if month == FEB:
max_days = FEB_LEAP_MONTH if ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0) else FEB_MONTH
elif month in MONTHS_30:
max_days = SHORT_MONTH
return 1 <= day <= max_days
@staticmethod
def validate_week(year: int, week: int) -> bool:
"""Validate week."""
max_week = datetime.strptime("{}-{}-{}".format(12, 31, year), "%m-%d-%Y").isocalendar()[1]
if max_week == 1:
max_week = 53
return 1 <= week <= max_week
@staticmethod
def validate_month(month: int) -> bool:
"""Validate month."""
return 1 <= month <= 12
@staticmethod
def validate_year(year: int) -> bool:
"""Validate year."""
return 1 <= year
@staticmethod
def validate_hour(hour: int) -> bool:
"""Validate hour."""
return 0 <= hour <= 23
@staticmethod
def validate_minutes(minutes: int) -> bool:
"""Validate minutes."""
return 0 <= minutes <= 59
@classmethod
def parse_value(cls, itype: str, value: Optional[str]) -> Optional[Tuple[float, ...]]:
"""Parse the input value."""
parsed = None # type: Optional[Tuple[float, ...]]
if value is None:
return value
if itype == "date":
m = RE_DATE.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day):
parsed = (year, month, day)
elif itype == "month":
m = RE_MONTH.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
if cls.validate_year(year) and cls.validate_month(month):
parsed = (year, month)
elif itype == "week":
m = RE_WEEK.match(value)
if m:
year = int(m.group('year'), 10)
week = int(m.group('week'), 10)
if cls.validate_year(year) and cls.validate_week(year, week):
parsed = (year, week)
elif itype == "time":
m = RE_TIME.match(value)
if m:
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (hour, minutes)
elif itype == "datetime-local":
m = RE_DATETIME.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if (
cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day) and
cls.validate_hour(hour) and cls.validate_minutes(minutes)
):
parsed = (year, month, day, hour, minutes)
elif itype in ("number", "range"):
m = RE_NUM.match(value)
if m:
parsed = (float(m.group('value')),)
return parsed
class CSSMatch(_DocumentNav):
"""Perform CSS matching."""
def __init__(
self,
selectors: ct.SelectorList,
scope: 'bs4.Tag',
namespaces: Optional[ct.Namespaces],
flags: int
) -> None:
"""Initialize."""
self.assert_valid_input(scope)
self.tag = scope
self.cached_meta_lang = [] # type: List[Tuple[str, str]]
self.cached_default_forms = [] # type: List[Tuple['bs4.Tag', 'bs4.Tag']]
self.cached_indeterminate_forms = [] # type: List[Tuple['bs4.Tag', str, bool]]
self.selectors = selectors
self.namespaces = {} if namespaces is None else namespaces # type: Union[ct.Namespaces, Dict[str, str]]
self.flags = flags
self.iframe_restrict = False
# Find the root element for the whole tree
doc = scope
parent = self.get_parent(doc)
while parent:
doc = parent
parent = self.get_parent(doc)
root = None
if not self.is_doc(doc):
root = doc
else:
for child in self.get_children(doc):
root = child
break
self.root = root
self.scope = scope if scope is not doc else root
self.has_html_namespace = self.has_html_ns(root)
# A document can be both XML and HTML (XHTML)
self.is_xml = self.is_xml_tree(doc)
self.is_html = not self.is_xml or self.has_html_namespace
def supports_namespaces(self) -> bool:
"""Check if namespaces are supported in the HTML type."""
return self.is_xml or self.has_html_namespace
def get_tag_ns(self, el: 'bs4.Tag') -> str:
"""Get tag namespace."""
if self.supports_namespaces():
namespace = ''
ns = self.get_uri(el)
if ns:
namespace = ns
else:
namespace = NS_XHTML
return namespace
def is_html_tag(self, el: 'bs4.Tag') -> bool:
"""Check if tag is in HTML namespace."""
return self.get_tag_ns(el) == NS_XHTML
def get_tag(self, el: 'bs4.Tag') -> Optional[str]:
"""Get tag."""
name = self.get_tag_name(el)
return util.lower(name) if name is not None and not self.is_xml else name
def get_prefix(self, el: 'bs4.Tag') -> Optional[str]:
"""Get prefix."""
prefix = self.get_prefix_name(el)
return util.lower(prefix) if prefix is not None and not self.is_xml else prefix
def find_bidi(self, el: 'bs4.Tag') -> Optional[int]:
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if (
self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or
not self.is_html_tag(node) or
direction is not None
):
continue # pragma: no cover
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value
# Direction could not be determined
continue # pragma: no cover
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return None
def extended_language_filter(self, lang_range: str, lang_tag: str) -> bool:
"""Filter the language tags."""
match = True
lang_range = RE_WILD_STRIP.sub('-', lang_range).lower()
ranges = lang_range.split('-')
subtags = lang_tag.lower().split('-')
length = len(ranges)
rindex = 0
sindex = 0
r = ranges[rindex]
s = subtags[sindex]
# Primary tag needs to match
if r != '*' and r != s:
match = False
rindex += 1
sindex += 1
# Match until we run out of ranges
while match and rindex < length:
r = ranges[rindex]
try:
s = subtags[sindex]
except IndexError:
# Ran out of subtags,
# but we still have ranges
match = False
continue
# Empty range
if not r:
match = False
continue
# Matched range
elif s == r:
rindex += 1
# Implicit wildcard cannot match
# singletons
elif len(s) == 1:
match = False
continue
# Implicitly matched, so grab next subtag
sindex += 1
return match
def match_attribute_name(
self,
el: 'bs4.Tag',
attr: str,
prefix: Optional[str]
) -> Optional[Union[str, Sequence[str]]]:
"""Match attribute name and return value if it exists."""
value = None
if self.supports_namespaces():
value = None
# If we have not defined namespaces, we can't very well find them, so don't bother trying.
if prefix:
ns = self.namespaces.get(prefix)
if ns is None and prefix != '*':
return None
else:
ns = None
for k, v in self.iter_attributes(el):
# Get attribute parts
namespace, name = self.split_namespace(el, k)
# Can't match a prefix attribute as we haven't specified one to match
# Try to match it normally as a whole `p:a` as selector may be trying `p\:a`.
if ns is None:
if (self.is_xml and attr == k) or (not self.is_xml and util.lower(attr) == util.lower(k)):
value = v
break
# Coverage is not finding this even though it is executed.
# Adding a print statement before this (and erasing coverage) causes coverage to find the line.
# Ignore the false positive message.
continue # pragma: no cover
# We can't match our desired prefix attribute as the attribute doesn't have a prefix
if namespace is None or ns != namespace and prefix != '*':
continue
# The attribute doesn't match.
if (util.lower(attr) != util.lower(name)) if not self.is_xml else (attr != name):
continue
value = v
break
else:
for k, v in self.iter_attributes(el):
if util.lower(attr) != util.lower(k):
continue
value = v
break
return value
def match_namespace(self, el: 'bs4.Tag', tag: ct.SelectorTag) -> bool:
"""Match the namespace of the element."""
match = True
namespace = self.get_tag_ns(el)
default_namespace = self.namespaces.get('')
tag_ns = '' if tag.prefix is None else self.namespaces.get(tag.prefix)
# We must match the default namespace if one is not provided
if tag.prefix is None and (default_namespace is not None and namespace != default_namespace):
match = False
# If we specified `|tag`, we must not have a namespace.
elif (tag.prefix is not None and tag.prefix == '' and namespace):
match = False
# Verify prefix matches
elif (
tag.prefix and
tag.prefix != '*' and (tag_ns is None or namespace != tag_ns)
):
match = False
return match
def match_attributes(self, el: 'bs4.Tag', attributes: Tuple[ct.SelectorAttribute, ...]) -> bool:
"""Match attributes."""
match = True
if attributes:
for a in attributes:
temp = self.match_attribute_name(el, a.attribute, a.prefix)
pattern = a.xml_type_pattern if self.is_xml and a.xml_type_pattern else a.pattern
if temp is None:
match = False
break
value = temp if isinstance(temp, str) else ' '.join(temp)
if pattern is None:
continue
elif pattern.match(value) is None:
match = False
break
return match
def match_tagname(self, el: 'bs4.Tag', tag: ct.SelectorTag) -> bool:
"""Match tag name."""
name = (util.lower(tag.name) if not self.is_xml and tag.name is not None else tag.name)
return not (
name is not None and
name not in (self.get_tag(el), '*')
)
def match_tag(self, el: 'bs4.Tag', tag: Optional[ct.SelectorTag]) -> bool:
"""Match the tag."""
match = True
if tag is not None:
# Verify namespace
if not self.match_namespace(el, tag):
match = False
if not self.match_tagname(el, tag):
match = False
return match
def match_past_relations(self, el: 'bs4.Tag', relation: ct.SelectorList) -> bool:
"""Match past relationship."""
found = False
# I don't think this can ever happen, but it makes `mypy` happy
if isinstance(relation[0], ct.SelectorNull): # pragma: no cover
return found
if relation[0].rel_type == REL_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
while not found and parent:
found = self.match_selectors(parent, relation)
parent = self.get_parent(parent, no_iframe=self.iframe_restrict)
elif relation[0].rel_type == REL_CLOSE_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
if parent:
found = self.match_selectors(parent, relation)
elif relation[0].rel_type == REL_SIBLING:
sibling = self.get_previous(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_previous(sibling)
elif relation[0].rel_type == REL_CLOSE_SIBLING:
sibling = self.get_previous(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
def match_future_child(self, parent: 'bs4.Tag', relation: ct.SelectorList, recursive: bool = False) -> bool:
"""Match future child."""
match = False
if recursive:
children = self.get_descendants # type: Callable[..., Iterator['bs4.Tag']]
else:
children = self.get_children
for child in children(parent, no_iframe=self.iframe_restrict):
match = self.match_selectors(child, relation)
if match:
break
return match
def match_future_relations(self, el: 'bs4.Tag', relation: ct.SelectorList) -> bool:
"""Match future relationship."""
found = False
# I don't think this can ever happen, but it makes `mypy` happy
if isinstance(relation[0], ct.SelectorNull): # pragma: no cover
return found
if relation[0].rel_type == REL_HAS_PARENT:
found = self.match_future_child(el, relation, True)
elif relation[0].rel_type == REL_HAS_CLOSE_PARENT:
found = self.match_future_child(el, relation)
elif relation[0].rel_type == REL_HAS_SIBLING:
sibling = self.get_next(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_next(sibling)
elif relation[0].rel_type == REL_HAS_CLOSE_SIBLING:
sibling = self.get_next(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
def match_relations(self, el: 'bs4.Tag', relation: ct.SelectorList) -> bool:
"""Match relationship to other elements."""
found = False
if isinstance(relation[0], ct.SelectorNull) or relation[0].rel_type is None:
return found
if relation[0].rel_type.startswith(':'):
found = self.match_future_relations(el, relation)
else:
found = self.match_past_relations(el, relation)
return found
def match_id(self, el: 'bs4.Tag', ids: Tuple[str, ...]) -> bool:
"""Match element's ID."""
found = True
for i in ids:
if i != self.get_attribute_by_name(el, 'id', ''):
found = False
break
return found
def match_classes(self, el: 'bs4.Tag', classes: Tuple[str, ...]) -> bool:
"""Match element's classes."""
current_classes = self.get_classes(el)
found = True
for c in classes:
if c not in current_classes:
found = False
break
return found
def match_root(self, el: 'bs4.Tag') -> bool:
"""Match element as root."""
is_root = self.is_root(el)
if is_root:
sibling = self.get_previous(el, tags=False)
while is_root and sibling is not None:
if (
self.is_tag(sibling) or (self.is_content_string(sibling) and sibling.strip()) or
self.is_cdata(sibling)
):
is_root = False
else:
sibling = self.get_previous(sibling, tags=False)
if is_root:
sibling = self.get_next(el, tags=False)
while is_root and sibling is not None:
if (
self.is_tag(sibling) or (self.is_content_string(sibling) and sibling.strip()) or
self.is_cdata(sibling)
):
is_root = False
else:
sibling = self.get_next(sibling, tags=False)
return is_root
def match_scope(self, el: 'bs4.Tag') -> bool:
"""Match element as scope."""
return self.scope is el
def match_nth_tag_type(self, el: 'bs4.Tag', child: 'bs4.Tag') -> bool:
"""Match tag type for `nth` matches."""
return(
(self.get_tag(child) == self.get_tag(el)) and
(self.get_tag_ns(child) == self.get_tag_ns(el))
)
def match_nth(self, el: 'bs4.Tag', nth: 'bs4.Tag') -> bool:
"""Match `nth` elements."""
matched = True
for n in nth:
matched = False
if n.selectors and not self.match_selectors(el, n.selectors):
break
parent = self.get_parent(el)
if parent is None:
parent = self.create_fake_parent(el)
last = n.last
last_index = len(parent) - 1
index = last_index if last else 0
relative_index = 0
a = n.a
b = n.b
var = n.n
count = 0
count_incr = 1
factor = -1 if last else 1
idx = last_idx = a * count + b if var else a
# We can only adjust bounds within a variable index
if var:
# Abort if our nth index is out of bounds and only getting further out of bounds as we increment.
# Otherwise, increment to try to get in bounds.
adjust = None
while idx < 1 or idx > last_index:
if idx < 0:
diff_low = 0 - idx
if adjust is not None and adjust == 1:
break
adjust = -1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = 0 - idx
if diff >= diff_low:
break
else:
diff_high = idx - last_index
if adjust is not None and adjust == -1:
break
adjust = 1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = idx - last_index
if diff >= diff_high:
break
diff_high = diff
# If a < 0, our count is working backwards, so floor the index by increasing the count.
# Find the count that yields the lowest, in bound value and use that.
# Lastly reverse count increment so that we'll increase our index.
lowest = count
if a < 0:
while idx >= 1:
lowest = count
count += count_incr
idx = last_idx = a * count + b if var else a
count_incr = -1
count = lowest
idx = last_idx = a * count + b if var else a
# Evaluate elements while our calculated nth index is still in range
while 1 <= idx <= last_index + 1:
child = None
# Evaluate while our child index is still in range.
for child in self.get_children(parent, start=index, reverse=factor < 0, tags=False):
index += factor
if not self.is_tag(child):
continue
# Handle `of S` in `nth-child`
if n.selectors and not self.match_selectors(child, n.selectors):
continue
# Handle `of-type`
if n.of_type and not self.match_nth_tag_type(el, child):
continue
relative_index += 1
if relative_index == idx:
if child is el:
matched = True
else:
break
if child is el:
break
if child is el:
break
last_idx = idx
count += count_incr
if count < 0:
# Count is counting down and has now ventured into invalid territory.
break
idx = a * count + b if var else a
if last_idx == idx:
break
if not matched:
break
return matched
def match_empty(self, el: 'bs4.Tag') -> bool:
"""Check if element is empty (if requested)."""
is_empty = True
for child in self.get_children(el, tags=False):
if self.is_tag(child):
is_empty = False
break
elif self.is_content_string(child) and RE_NOT_EMPTY.search(child):
is_empty = False
break
return is_empty
def match_subselectors(self, el: 'bs4.Tag', selectors: Tuple[ct.SelectorList, ...]) -> bool:
"""Match selectors."""
match = True
for sel in selectors:
if not self.match_selectors(el, sel):
match = False
return match
def match_contains(self, el: 'bs4.Tag', contains: Tuple[ct.SelectorContains, ...]) -> bool:
"""Match element if it contains text."""
match = True
content = None # type: Optional[Union[str, Sequence[str]]]
for contain_list in contains:
if content is None:
if contain_list.own:
content = self.get_own_text(el, no_iframe=self.is_html)
else:
content = self.get_text(el, no_iframe=self.is_html)
found = False
for text in contain_list.text:
if contain_list.own:
for c in content:
if text in c:
found = True
break
if found:
break
else:
if text in content:
found = True
break
if not found:
match = False
return match
def match_default(self, el: 'bs4.Tag') -> bool:
"""Match default."""
match = False
# Find this input's form
form = None
parent = self.get_parent(el, no_iframe=True)
while parent and form is None:
if self.get_tag(parent) == 'form' and self.is_html_tag(parent):
form = parent
else:
parent = self.get_parent(parent, no_iframe=True)
# Look in form cache to see if we've already located its default button
found_form = False
for f, t in self.cached_default_forms:
if f is form:
found_form = True
if t is el:
match = True
break
# We didn't have the form cached, so look for its default button
if not found_form:
for child in self.get_descendants(form, no_iframe=True):
name = self.get_tag(child)
# Can't do nested forms (haven't figured out why we never hit this)
if name == 'form': # pragma: no cover
break
if name in ('input', 'button'):
v = self.get_attribute_by_name(child, 'type', '')
if v and util.lower(v) == 'submit':
self.cached_default_forms.append((form, child))
if el is child:
match = True
break
return match
def match_indeterminate(self, el: 'bs4.Tag') -> bool:
"""Match default."""
match = False
name = cast(str, self.get_attribute_by_name(el, 'name'))
def get_parent_form(el: 'bs4.Tag') -> Optional['bs4.Tag']:
"""Find this input's form."""
form = None
parent = self.get_parent(el, no_iframe=True)
while form is None:
if self.get_tag(parent) == 'form' and self.is_html_tag(parent):
form = parent
break
last_parent = parent
parent = self.get_parent(parent, no_iframe=True)
if parent is None:
form = last_parent
break
return form
form = get_parent_form(el)
# Look in form cache to see if we've already evaluated that its fellow radio buttons are indeterminate
found_form = False
for f, n, i in self.cached_indeterminate_forms:
if f is form and n == name:
found_form = True
if i is True:
match = True
break
# We didn't have the form cached, so validate that the radio button is indeterminate
if not found_form:
checked = False
for child in self.get_descendants(form, no_iframe=True):
if child is el:
continue
tag_name = self.get_tag(child)
if tag_name == 'input':
is_radio = False
check = False
has_name = False
for k, v in self.iter_attributes(child):
if util.lower(k) == 'type' and util.lower(v) == 'radio':
is_radio = True
elif util.lower(k) == 'name' and v == name:
has_name = True
elif util.lower(k) == 'checked':
check = True
if is_radio and check and has_name and get_parent_form(child) is form:
checked = True
break
if checked:
break
if not checked:
match = True
self.cached_indeterminate_forms.append((form, name, match))
return match
def match_lang(self, el: 'bs4.Tag', langs: Tuple[ct.SelectorLang, ...]) -> bool:
"""Match languages."""
match = False
has_ns = self.supports_namespaces()
root = self.root
has_html_namespace = self.has_html_namespace
# Walk parents looking for `lang` (HTML) or `xml:lang` XML property.
parent = el
found_lang = None
last = None
while not found_lang:
has_html_ns = self.has_html_ns(parent)
for k, v in self.iter_attributes(parent):
attr_ns, attr = self.split_namespace(parent, k)
if (
((not has_ns or has_html_ns) and (util.lower(k) if not self.is_xml else k) == 'lang') or
(
has_ns and not has_html_ns and attr_ns == NS_XML and
(util.lower(attr) if not self.is_xml and attr is not None else attr) == 'lang'
)
):
found_lang = v
break
last = parent
parent = self.get_parent(parent, no_iframe=self.is_html)
if parent is None:
root = last
has_html_namespace = self.has_html_ns(root)
parent = last
break
# Use cached meta language.
if not found_lang and self.cached_meta_lang:
for cache in self.cached_meta_lang:
if root is cache[0]:
found_lang = cache[1]
# If we couldn't find a language, and the document is HTML, look to meta to determine language.
if found_lang is None and (not self.is_xml or (has_html_namespace and root.name == 'html')):
# Find head
found = False
for tag in ('html', 'head'):
found = False
for child in self.get_children(parent, no_iframe=self.is_html):
if self.get_tag(child) == tag and self.is_html_tag(child):
found = True
parent = child
break
if not found: # pragma: no cover
break
# Search meta tags
if found:
for child in parent:
if self.is_tag(child) and self.get_tag(child) == 'meta' and self.is_html_tag(parent):
c_lang = False
content = None
for k, v in self.iter_attributes(child):
if util.lower(k) == 'http-equiv' and util.lower(v) == 'content-language':
c_lang = True
if util.lower(k) == 'content':
content = v
if c_lang and content:
found_lang = content
self.cached_meta_lang.append((cast(str, root), cast(str, found_lang)))
break
if found_lang:
break
if not found_lang:
self.cached_meta_lang.append((cast(str, root), ''))
# If we determined a language, compare.
if found_lang:
for patterns in langs:
match = False
for pattern in patterns:
if self.extended_language_filter(pattern, cast(str, found_lang)):
match = True
if not match:
break
return match
def match_dir(self, el: 'bs4.Tag', directionality: int) -> bool:
"""Check directionality."""
# If we have to match both left and right, we can't match either.
if directionality & ct.SEL_DIR_LTR and directionality & ct.SEL_DIR_RTL:
return False
if el is None or not self.is_html_tag(el):
return False
# Element has defined direction of left to right or right to left
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(el, 'dir', '')), None)
if direction not in (None, 0):
return direction == directionality
# Element is the document element (the root) and no direction assigned, assume left to right.
is_root = self.is_root(el)
if is_root and direction is None:
return ct.SEL_DIR_LTR == directionality
# If `input[type=telephone]` and no direction is assigned, assume left to right.
name = self.get_tag(el)
is_input = name == 'input'
is_textarea = name == 'textarea'
is_bdi = name == 'bdi'
itype = util.lower(self.get_attribute_by_name(el, 'type', '')) if is_input else ''
if is_input and itype == 'tel' and direction is None:
return ct.SEL_DIR_LTR == directionality
# Auto handling for text inputs
if ((is_input and itype in ('text', 'search', 'tel', 'url', 'email')) or is_textarea) and direction == 0:
if is_textarea:
temp = []
for node in self.get_contents(el, no_iframe=True):
if self.is_content_string(node):
temp.append(node)
value = ''.join(temp)
else:
value = cast(str, self.get_attribute_by_name(el, 'value', ''))
if value:
for c in value:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
direction = ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return direction == directionality
# Assume left to right
return ct.SEL_DIR_LTR == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Auto handling for `bdi` and other non text inputs.
if (is_bdi and direction is None) or direction == 0:
direction = self.find_bidi(el)
if direction is not None:
return direction == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Match parents direction
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
def match_range(self, el: 'bs4.Tag', condition: int) -> bool:
"""
Match range.
Behavior is modeled after what we see in browsers. Browsers seem to evaluate
if the value is out of range, and if not, it is in range. So a missing value
will not evaluate out of range; therefore, value is in range. Personally, I
feel like this should evaluate as neither in or out of range.
"""
out_of_range = False
itype = util.lower(self.get_attribute_by_name(el, 'type'))
mn = Inputs.parse_value(itype, cast(str, self.get_attribute_by_name(el, 'min', None)))
mx = Inputs.parse_value(itype, cast(str, self.get_attribute_by_name(el, 'max', None)))
# There is no valid min or max, so we cannot evaluate a range
if mn is None and mx is None:
return False
value = Inputs.parse_value(itype, cast(str, self.get_attribute_by_name(el, 'value', None)))
if value is not None:
if itype in ("date", "datetime-local", "month", "week", "number", "range"):
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
elif itype == "time":
if mn is not None and mx is not None and mn > mx:
# Time is periodic, so this is a reversed/discontinuous range
if value < mn and value > mx:
out_of_range = True
else:
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
return not out_of_range if condition & ct.SEL_IN_RANGE else out_of_range
def match_defined(self, el: 'bs4.Tag') -> bool:
"""
Match defined.
`:defined` is related to custom elements in a browser.
- If the document is XML (not XHTML), all tags will match.
- Tags that are not custom (don't have a hyphen) are marked defined.
- If the tag has a prefix (without or without a namespace), it will not match.
This is of course requires the parser to provide us with the proper prefix and namespace info,
if it doesn't, there is nothing we can do.
"""
name = self.get_tag(el)
return (
name is not None and (
name.find('-') == -1 or
name.find(':') != -1 or
self.get_prefix(el) is not None
)
)
def match_placeholder_shown(self, el: 'bs4.Tag') -> bool:
"""
Match placeholder shown according to HTML spec.
- text area should be checked if they have content. A single newline does not count as content.
"""
match = False
content = self.get_text(el)
if content in ('', '\n'):
match = True
return match
def match_selectors(self, el: 'bs4.Tag', selectors: ct.SelectorList) -> bool:
"""Check if element matches one of the selectors."""
match = False
is_not = selectors.is_not
is_html = selectors.is_html
# Internal selector lists that use the HTML flag, will automatically get the `html` namespace.
if is_html:
namespaces = self.namespaces
iframe_restrict = self.iframe_restrict
self.namespaces = {'html': NS_XHTML}
self.iframe_restrict = True
if not is_html or self.is_html:
for selector in selectors:
match = is_not
# We have a un-matchable situation (like `:focus` as you can focus an element in this environment)
if isinstance(selector, ct.SelectorNull):
continue
# Verify tag matches
if not self.match_tag(el, selector.tag):
continue
# Verify tag is defined
if selector.flags & ct.SEL_DEFINED and not self.match_defined(el):
continue
# Verify element is root
if selector.flags & ct.SEL_ROOT and not self.match_root(el):
continue
# Verify element is scope
if selector.flags & ct.SEL_SCOPE and not self.match_scope(el):
continue
# Verify element has placeholder shown
if selector.flags & ct.SEL_PLACEHOLDER_SHOWN and not self.match_placeholder_shown(el):
continue
# Verify `nth` matches
if not self.match_nth(el, selector.nth):
continue
if selector.flags & ct.SEL_EMPTY and not self.match_empty(el):
continue
# Verify id matches
if selector.ids and not self.match_id(el, selector.ids):
continue
# Verify classes match
if selector.classes and not self.match_classes(el, selector.classes):
continue
# Verify attribute(s) match
if not self.match_attributes(el, selector.attributes):
continue
# Verify ranges
if selector.flags & RANGES and not self.match_range(el, selector.flags & RANGES):
continue
# Verify language patterns
if selector.lang and not self.match_lang(el, selector.lang):
continue
# Verify pseudo selector patterns
if selector.selectors and not self.match_subselectors(el, selector.selectors):
continue
# Verify relationship selectors
if selector.relation and not self.match_relations(el, selector.relation):
continue
# Validate that the current default selector match corresponds to the first submit button in the form
if selector.flags & ct.SEL_DEFAULT and not self.match_default(el):
continue
# Validate that the unset radio button is among radio buttons with the same name in a form that are
# also not set.
if selector.flags & ct.SEL_INDETERMINATE and not self.match_indeterminate(el):
continue
# Validate element directionality
if selector.flags & DIR_FLAGS and not self.match_dir(el, selector.flags & DIR_FLAGS):
continue
# Validate that the tag contains the specified text.
if selector.contains and not self.match_contains(el, selector.contains):
continue
match = not is_not
break
# Restore actual namespaces being used for external selector lists
if is_html:
self.namespaces = namespaces
self.iframe_restrict = iframe_restrict
return match
def select(self, limit: int = 0) -> Iterator['bs4.Tag']:
"""Match all tags under the targeted tag."""
lim = None if limit < 1 else limit
for child in self.get_descendants(self.tag):
if self.match(child):
yield child
if lim is not None:
lim -= 1
if lim < 1:
break
def closest(self) -> Optional['bs4.Tag']:
"""Match closest ancestor."""
current = self.tag
closest = None
while closest is None and current is not None:
if self.match(current):
closest = current
else:
current = self.get_parent(current)
return closest
def filter(self) -> List['bs4.Tag']: # noqa A001
"""Filter tag's children."""
return [tag for tag in self.get_contents(self.tag) if not self.is_navigable_string(tag) and self.match(tag)]
def match(self, el: 'bs4.Tag') -> bool:
"""Match."""
return not self.is_doc(el) and self.is_tag(el) and self.match_selectors(el, self.selectors)
class SoupSieve(ct.Immutable):
"""Compiled Soup Sieve selector matching object."""
pattern: str
selectors: ct.SelectorList
namespaces: Optional[ct.Namespaces]
custom: Dict[str, str]
flags: int
__slots__ = ("pattern", "selectors", "namespaces", "custom", "flags", "_hash")
def __init__(
self,
pattern: str,
selectors: ct.SelectorList,
namespaces: Optional[ct.Namespaces],
custom: Optional[ct.CustomSelectors],
flags: int
):
"""Initialize."""
super().__init__(
pattern=pattern,
selectors=selectors,
namespaces=namespaces,
custom=custom,
flags=flags
)
def match(self, tag: 'bs4.Tag') -> bool:
"""Match."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).match(tag)
def closest(self, tag: 'bs4.Tag') -> 'bs4.Tag':
"""Match closest ancestor."""
return CSSMatch(self.selectors, tag, self.namespaces, self.flags).closest()
def filter(self, iterable: Iterable['bs4.Tag']) -> List['bs4.Tag']: # noqa A001
"""
Filter.
`CSSMatch` can cache certain searches for tags of the same document,
so if we are given a tag, all tags are from the same document,
and we can take advantage of the optimization.
Any other kind of iterable could have tags from different documents or detached tags,
so for those, we use a new `CSSMatch` for each item in the iterable.
"""
if CSSMatch.is_tag(iterable):
return CSSMatch(self.selectors, iterable, self.namespaces, self.flags).filter()
else:
return [node for node in iterable if not CSSMatch.is_navigable_string(node) and self.match(node)]
def select_one(self, tag: 'bs4.Tag') -> 'bs4.Tag':
"""Select a single tag."""
tags = self.select(tag, limit=1)
return tags[0] if tags else None
def select(self, tag: 'bs4.Tag', limit: int = 0) -> List['bs4.Tag']:
"""Select the specified tags."""
return list(self.iselect(tag, limit))
def iselect(self, tag: 'bs4.Tag', limit: int = 0) -> Iterator['bs4.Tag']:
"""Iterate the specified tags."""
for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit):
yield el
def __repr__(self) -> str: # pragma: no cover
"""Representation."""
return "SoupSieve(pattern={!r}, namespaces={!r}, custom={!r}, flags={!r})".format(
self.pattern,
self.namespaces,
self.custom,
self.flags
)
__str__ = __repr__
ct.pickle_register(SoupSieve)
| 58,167
|
Python
|
.py
| 1,314
| 31.350837
| 120
| 0.534843
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,563
|
css_parser.py
|
rembo10_headphones/lib/soupsieve/css_parser.py
|
"""CSS selector parser."""
import re
from functools import lru_cache
from . import util
from . import css_match as cm
from . import css_types as ct
from .util import SelectorSyntaxError
import warnings
from typing import Optional, Dict, Match, Tuple, Type, Any, List, Union, Iterator, cast
UNICODE_REPLACEMENT_CHAR = 0xFFFD
# Simple pseudo classes that take no parameters
PSEUDO_SIMPLE = {
":any-link",
":empty",
":first-child",
":first-of-type",
":in-range",
":out-of-range",
":last-child",
":last-of-type",
":link",
":only-child",
":only-of-type",
":root",
':checked',
':default',
':disabled',
':enabled',
':indeterminate',
':optional',
':placeholder-shown',
':read-only',
':read-write',
':required',
':scope',
':defined'
}
# Supported, simple pseudo classes that match nothing in the Soup Sieve environment
PSEUDO_SIMPLE_NO_MATCH = {
':active',
':current',
':focus',
':focus-visible',
':focus-within',
':future',
':host',
':hover',
':local-link',
':past',
':paused',
':playing',
':target',
':target-within',
':user-invalid',
':visited'
}
# Complex pseudo classes that take selector lists
PSEUDO_COMPLEX = {
':contains',
':-soup-contains',
':-soup-contains-own',
':has',
':is',
':matches',
':not',
':where'
}
PSEUDO_COMPLEX_NO_MATCH = {
':current',
':host',
':host-context'
}
# Complex pseudo classes that take very specific parameters and are handled special
PSEUDO_SPECIAL = {
':dir',
':lang',
':nth-child',
':nth-last-child',
':nth-last-of-type',
':nth-of-type'
}
PSEUDO_SUPPORTED = PSEUDO_SIMPLE | PSEUDO_SIMPLE_NO_MATCH | PSEUDO_COMPLEX | PSEUDO_COMPLEX_NO_MATCH | PSEUDO_SPECIAL
# Sub-patterns parts
# Whitespace
NEWLINE = r'(?:\r\n|(?!\r\n)[\n\f\r])'
WS = r'(?:[ \t]|{})'.format(NEWLINE)
# Comments
COMMENTS = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
# Whitespace with comments included
WSC = r'(?:{ws}|{comments})'.format(ws=WS, comments=COMMENTS)
# CSS escapes
CSS_ESCAPES = r'(?:\\(?:[a-f0-9]{{1,6}}{ws}?|[^\r\n\f]|$))'.format(ws=WS)
CSS_STRING_ESCAPES = r'(?:\\(?:[a-f0-9]{{1,6}}{ws}?|[^\r\n\f]|$|{nl}))'.format(ws=WS, nl=NEWLINE)
# CSS Identifier
IDENTIFIER = r'''
(?:(?:-?(?:[^\x00-\x2f\x30-\x40\x5B-\x5E\x60\x7B-\x9f]|{esc})+|--)
(?:[^\x00-\x2c\x2e\x2f\x3A-\x40\x5B-\x5E\x60\x7B-\x9f]|{esc})*)
'''.format(esc=CSS_ESCAPES)
# `nth` content
NTH = r'(?:[-+])?(?:[0-9]+n?|n)(?:(?<=n){ws}*(?:[-+]){ws}*(?:[0-9]+))?'.format(ws=WSC)
# Value: quoted string or identifier
VALUE = r'''
(?:"(?:\\(?:.|{nl})|[^\\"\r\n\f]+)*?"|'(?:\\(?:.|{nl})|[^\\'\r\n\f]+)*?'|{ident}+)
'''.format(nl=NEWLINE, ident=IDENTIFIER)
# Attribute value comparison. `!=` is handled special as it is non-standard.
ATTR = r'''
(?:{ws}*(?P<cmp>[!~^|*$]?=){ws}*(?P<value>{value})(?:{ws}+(?P<case>[is]))?)?{ws}*\]
'''.format(ws=WSC, value=VALUE)
# Selector patterns
# IDs (`#id`)
PAT_ID = r'\#{ident}'.format(ident=IDENTIFIER)
# Classes (`.class`)
PAT_CLASS = r'\.{ident}'.format(ident=IDENTIFIER)
# Prefix:Tag (`prefix|tag`)
PAT_TAG = r'(?P<tag_ns>(?:{ident}|\*)?\|)?(?P<tag_name>{ident}|\*)'.format(ident=IDENTIFIER)
# Attributes (`[attr]`, `[attr=value]`, etc.)
PAT_ATTR = r'''
\[{ws}*(?P<attr_ns>(?:{ident}|\*)?\|)?(?P<attr_name>{ident}){attr}
'''.format(ws=WSC, ident=IDENTIFIER, attr=ATTR)
# Pseudo class (`:pseudo-class`, `:pseudo-class(`)
PAT_PSEUDO_CLASS = r'(?P<name>:{ident})(?P<open>\({ws}*)?'.format(ws=WSC, ident=IDENTIFIER)
# Pseudo class special patterns. Matches `:pseudo-class(` for special case pseudo classes.
PAT_PSEUDO_CLASS_SPECIAL = r'(?P<name>:{ident})(?P<open>\({ws}*)'.format(ws=WSC, ident=IDENTIFIER)
# Custom pseudo class (`:--custom-pseudo`)
PAT_PSEUDO_CLASS_CUSTOM = r'(?P<name>:(?=--){ident})'.format(ident=IDENTIFIER)
# Closing pseudo group (`)`)
PAT_PSEUDO_CLOSE = r'{ws}*\)'.format(ws=WSC)
# Pseudo element (`::pseudo-element`)
PAT_PSEUDO_ELEMENT = r':{}'.format(PAT_PSEUDO_CLASS)
# At rule (`@page`, etc.) (not supported)
PAT_AT_RULE = r'@P{ident}'.format(ident=IDENTIFIER)
# Pseudo class `nth-child` (`:nth-child(an+b [of S]?)`, `:first-child`, etc.)
PAT_PSEUDO_NTH_CHILD = r'''
(?P<pseudo_nth_child>{name}
(?P<nth_child>{nth}|even|odd))(?:{wsc}*\)|(?P<of>{comments}*{ws}{wsc}*of{comments}*{ws}{wsc}*))
'''.format(name=PAT_PSEUDO_CLASS_SPECIAL, wsc=WSC, comments=COMMENTS, ws=WS, nth=NTH)
# Pseudo class `nth-of-type` (`:nth-of-type(an+b)`, `:first-of-type`, etc.)
PAT_PSEUDO_NTH_TYPE = r'''
(?P<pseudo_nth_type>{name}
(?P<nth_type>{nth}|even|odd)){ws}*\)
'''.format(name=PAT_PSEUDO_CLASS_SPECIAL, ws=WSC, nth=NTH)
# Pseudo class language (`:lang("*-de", en)`)
PAT_PSEUDO_LANG = r'{name}(?P<values>{value}(?:{ws}*,{ws}*{value})*){ws}*\)'.format(
name=PAT_PSEUDO_CLASS_SPECIAL, ws=WSC, value=VALUE
)
# Pseudo class direction (`:dir(ltr)`)
PAT_PSEUDO_DIR = r'{name}(?P<dir>ltr|rtl){ws}*\)'.format(name=PAT_PSEUDO_CLASS_SPECIAL, ws=WSC)
# Combining characters (`>`, `~`, ` `, `+`, `,`)
PAT_COMBINE = r'{wsc}*?(?P<relation>[,+>~]|{ws}(?![,+>~])){wsc}*'.format(ws=WS, wsc=WSC)
# Extra: Contains (`:contains(text)`)
PAT_PSEUDO_CONTAINS = r'{name}(?P<values>{value}(?:{ws}*,{ws}*{value})*){ws}*\)'.format(
name=PAT_PSEUDO_CLASS_SPECIAL, ws=WSC, value=VALUE
)
# Regular expressions
# CSS escape pattern
RE_CSS_ESC = re.compile(r'(?:(\\[a-f0-9]{{1,6}}{ws}?)|(\\[^\r\n\f])|(\\$))'.format(ws=WSC), re.I)
RE_CSS_STR_ESC = re.compile(
r'(?:(\\[a-f0-9]{{1,6}}{ws}?)|(\\[^\r\n\f])|(\\$)|(\\{nl}))'.format(ws=WS, nl=NEWLINE), re.I
)
# Pattern to break up `nth` specifiers
RE_NTH = re.compile(
r'(?P<s1>[-+])?(?P<a>[0-9]+n?|n)(?:(?<=n){ws}*(?P<s2>[-+]){ws}*(?P<b>[0-9]+))?'.format(ws=WSC),
re.I
)
# Pattern to iterate multiple values.
RE_VALUES = re.compile(r'(?:(?P<value>{value})|(?P<split>{ws}*,{ws}*))'.format(ws=WSC, value=VALUE), re.X)
# Whitespace checks
RE_WS = re.compile(WS)
RE_WS_BEGIN = re.compile('^{}*'.format(WSC))
RE_WS_END = re.compile('{}*$'.format(WSC))
RE_CUSTOM = re.compile(r'^{}$'.format(PAT_PSEUDO_CLASS_CUSTOM), re.X)
# Constants
# List split token
COMMA_COMBINATOR = ','
# Relation token for descendant
WS_COMBINATOR = " "
# Parse flags
FLG_PSEUDO = 0x01
FLG_NOT = 0x02
FLG_RELATIVE = 0x04
FLG_DEFAULT = 0x08
FLG_HTML = 0x10
FLG_INDETERMINATE = 0x20
FLG_OPEN = 0x40
FLG_IN_RANGE = 0x80
FLG_OUT_OF_RANGE = 0x100
FLG_PLACEHOLDER_SHOWN = 0x200
FLG_FORGIVE = 0x400
# Maximum cached patterns to store
_MAXCACHE = 500
@lru_cache(maxsize=_MAXCACHE)
def _cached_css_compile(
pattern: str,
namespaces: Optional[ct.Namespaces],
custom: Optional[ct.CustomSelectors],
flags: int
) -> cm.SoupSieve:
"""Cached CSS compile."""
custom_selectors = process_custom(custom)
return cm.SoupSieve(
pattern,
CSSParser(
pattern,
custom=custom_selectors,
flags=flags
).process_selectors(),
namespaces,
custom,
flags
)
def _purge_cache() -> None:
"""Purge the cache."""
_cached_css_compile.cache_clear()
def process_custom(custom: Optional[ct.CustomSelectors]) -> Dict[str, Union[str, ct.SelectorList]]:
"""Process custom."""
custom_selectors = {}
if custom is not None:
for key, value in custom.items():
name = util.lower(key)
if RE_CUSTOM.match(name) is None:
raise SelectorSyntaxError("The name '{}' is not a valid custom pseudo-class name".format(name))
if name in custom_selectors:
raise KeyError("The custom selector '{}' has already been registered".format(name))
custom_selectors[css_unescape(name)] = value
return custom_selectors
def css_unescape(content: str, string: bool = False) -> str:
"""
Unescape CSS value.
Strings allow for spanning the value on multiple strings by escaping a new line.
"""
def replace(m: Match[str]) -> str:
"""Replace with the appropriate substitute."""
if m.group(1):
codepoint = int(m.group(1)[1:], 16)
if codepoint == 0:
codepoint = UNICODE_REPLACEMENT_CHAR
value = chr(codepoint)
elif m.group(2):
value = m.group(2)[1:]
elif m.group(3):
value = '\ufffd'
else:
value = ''
return value
return (RE_CSS_ESC if not string else RE_CSS_STR_ESC).sub(replace, content)
def escape(ident: str) -> str:
"""Escape identifier."""
string = []
length = len(ident)
start_dash = length > 0 and ident[0] == '-'
if length == 1 and start_dash:
# Need to escape identifier that is a single `-` with no other characters
string.append('\\{}'.format(ident))
else:
for index, c in enumerate(ident):
codepoint = ord(c)
if codepoint == 0x00:
string.append('\ufffd')
elif (0x01 <= codepoint <= 0x1F) or codepoint == 0x7F:
string.append('\\{:x} '.format(codepoint))
elif (index == 0 or (start_dash and index == 1)) and (0x30 <= codepoint <= 0x39):
string.append('\\{:x} '.format(codepoint))
elif (
codepoint in (0x2D, 0x5F) or codepoint >= 0x80 or (0x30 <= codepoint <= 0x39) or
(0x30 <= codepoint <= 0x39) or (0x41 <= codepoint <= 0x5A) or (0x61 <= codepoint <= 0x7A)
):
string.append(c)
else:
string.append('\\{}'.format(c))
return ''.join(string)
class SelectorPattern:
"""Selector pattern."""
def __init__(self, name: str, pattern: str) -> None:
"""Initialize."""
self.name = name
self.re_pattern = re.compile(pattern, re.I | re.X | re.U)
def get_name(self) -> str:
"""Get name."""
return self.name
def match(self, selector: str, index: int, flags: int) -> Optional[Match[str]]:
"""Match the selector."""
return self.re_pattern.match(selector, index)
class SpecialPseudoPattern(SelectorPattern):
"""Selector pattern."""
def __init__(self, patterns: Tuple[Tuple[str, Tuple[str, ...], str, Type[SelectorPattern]], ...]) -> None:
"""Initialize."""
self.patterns = {}
for p in patterns:
name = p[0]
pattern = p[3](name, p[2])
for pseudo in p[1]:
self.patterns[pseudo] = pattern
self.matched_name = None # type: Optional[SelectorPattern]
self.re_pseudo_name = re.compile(PAT_PSEUDO_CLASS_SPECIAL, re.I | re.X | re.U)
def get_name(self) -> str:
"""Get name."""
return '' if self.matched_name is None else self.matched_name.get_name()
def match(self, selector: str, index: int, flags: int) -> Optional[Match[str]]:
"""Match the selector."""
pseudo = None
m = self.re_pseudo_name.match(selector, index)
if m:
name = util.lower(css_unescape(m.group('name')))
pattern = self.patterns.get(name)
if pattern:
pseudo = pattern.match(selector, index, flags)
if pseudo:
self.matched_name = pattern
return pseudo
class _Selector:
"""
Intermediate selector class.
This stores selector data for a compound selector as we are acquiring them.
Once we are done collecting the data for a compound selector, we freeze
the data in an object that can be pickled and hashed.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize."""
self.tag = kwargs.get('tag', None) # type: Optional[ct.SelectorTag]
self.ids = kwargs.get('ids', []) # type: List[str]
self.classes = kwargs.get('classes', []) # type: List[str]
self.attributes = kwargs.get('attributes', []) # type: List[ct.SelectorAttribute]
self.nth = kwargs.get('nth', []) # type: List[ct.SelectorNth]
self.selectors = kwargs.get('selectors', []) # type: List[ct.SelectorList]
self.relations = kwargs.get('relations', []) # type: List[_Selector]
self.rel_type = kwargs.get('rel_type', None) # type: Optional[str]
self.contains = kwargs.get('contains', []) # type: List[ct.SelectorContains]
self.lang = kwargs.get('lang', []) # type: List[ct.SelectorLang]
self.flags = kwargs.get('flags', 0) # type: int
self.no_match = kwargs.get('no_match', False) # type: bool
def _freeze_relations(self, relations: List['_Selector']) -> ct.SelectorList:
"""Freeze relation."""
if relations:
sel = relations[0]
sel.relations.extend(relations[1:])
return ct.SelectorList([sel.freeze()])
else:
return ct.SelectorList()
def freeze(self) -> Union[ct.Selector, ct.SelectorNull]:
"""Freeze self."""
if self.no_match:
return ct.SelectorNull()
else:
return ct.Selector(
self.tag,
tuple(self.ids),
tuple(self.classes),
tuple(self.attributes),
tuple(self.nth),
tuple(self.selectors),
self._freeze_relations(self.relations),
self.rel_type,
tuple(self.contains),
tuple(self.lang),
self.flags
)
def __str__(self) -> str: # pragma: no cover
"""String representation."""
return (
'_Selector(tag={!r}, ids={!r}, classes={!r}, attributes={!r}, nth={!r}, selectors={!r}, '
'relations={!r}, rel_type={!r}, contains={!r}, lang={!r}, flags={!r}, no_match={!r})'
).format(
self.tag, self.ids, self.classes, self.attributes, self.nth, self.selectors,
self.relations, self.rel_type, self.contains, self.lang, self.flags, self.no_match
)
__repr__ = __str__
class CSSParser:
"""Parse CSS selectors."""
css_tokens = (
SelectorPattern("pseudo_close", PAT_PSEUDO_CLOSE),
SpecialPseudoPattern(
(
(
"pseudo_contains",
(':contains', ':-soup-contains', ':-soup-contains-own'),
PAT_PSEUDO_CONTAINS,
SelectorPattern
),
("pseudo_nth_child", (':nth-child', ':nth-last-child'), PAT_PSEUDO_NTH_CHILD, SelectorPattern),
("pseudo_nth_type", (':nth-of-type', ':nth-last-of-type'), PAT_PSEUDO_NTH_TYPE, SelectorPattern),
("pseudo_lang", (':lang',), PAT_PSEUDO_LANG, SelectorPattern),
("pseudo_dir", (':dir',), PAT_PSEUDO_DIR, SelectorPattern)
)
),
SelectorPattern("pseudo_class_custom", PAT_PSEUDO_CLASS_CUSTOM),
SelectorPattern("pseudo_class", PAT_PSEUDO_CLASS),
SelectorPattern("pseudo_element", PAT_PSEUDO_ELEMENT),
SelectorPattern("at_rule", PAT_AT_RULE),
SelectorPattern("id", PAT_ID),
SelectorPattern("class", PAT_CLASS),
SelectorPattern("tag", PAT_TAG),
SelectorPattern("attribute", PAT_ATTR),
SelectorPattern("combine", PAT_COMBINE)
)
def __init__(
self,
selector: str,
custom: Optional[Dict[str, Union[str, ct.SelectorList]]] = None,
flags: int = 0
) -> None:
"""Initialize."""
self.pattern = selector.replace('\x00', '\ufffd')
self.flags = flags
self.debug = self.flags & util.DEBUG
self.custom = {} if custom is None else custom
def parse_attribute_selector(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Create attribute selector from the returned regex match."""
inverse = False
op = m.group('cmp')
case = util.lower(m.group('case')) if m.group('case') else None
ns = css_unescape(m.group('attr_ns')[:-1]) if m.group('attr_ns') else ''
attr = css_unescape(m.group('attr_name'))
is_type = False
pattern2 = None
value = ''
if case:
flags = (re.I if case == 'i' else 0) | re.DOTALL
elif util.lower(attr) == 'type':
flags = re.I | re.DOTALL
is_type = True
else:
flags = re.DOTALL
if op:
if m.group('value').startswith(('"', "'")):
value = css_unescape(m.group('value')[1:-1], True)
else:
value = css_unescape(m.group('value'))
if not op:
# Attribute name
pattern = None
elif op.startswith('^'):
# Value start with
pattern = re.compile(r'^%s.*' % re.escape(value), flags)
elif op.startswith('$'):
# Value ends with
pattern = re.compile(r'.*?%s$' % re.escape(value), flags)
elif op.startswith('*'):
# Value contains
pattern = re.compile(r'.*?%s.*' % re.escape(value), flags)
elif op.startswith('~'):
# Value contains word within space separated list
# `~=` should match nothing if it is empty or contains whitespace,
# so if either of these cases is present, use `[^\s\S]` which cannot be matched.
value = r'[^\s\S]' if not value or RE_WS.search(value) else re.escape(value)
pattern = re.compile(r'.*?(?:(?<=^)|(?<=[ \t\r\n\f]))%s(?=(?:[ \t\r\n\f]|$)).*' % value, flags)
elif op.startswith('|'):
# Value starts with word in dash separated list
pattern = re.compile(r'^%s(?:-.*)?$' % re.escape(value), flags)
else:
# Value matches
pattern = re.compile(r'^%s$' % re.escape(value), flags)
if op.startswith('!'):
# Equivalent to `:not([attr=value])`
inverse = True
if is_type and pattern:
pattern2 = re.compile(pattern.pattern)
# Append the attribute selector
sel_attr = ct.SelectorAttribute(attr, ns, pattern, pattern2)
if inverse:
# If we are using `!=`, we need to nest the pattern under a `:not()`.
sub_sel = _Selector()
sub_sel.attributes.append(sel_attr)
not_list = ct.SelectorList([sub_sel.freeze()], True, False)
sel.selectors.append(not_list)
else:
sel.attributes.append(sel_attr)
has_selector = True
return has_selector
def parse_tag_pattern(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Parse tag pattern from regex match."""
prefix = css_unescape(m.group('tag_ns')[:-1]) if m.group('tag_ns') else None
tag = css_unescape(m.group('tag_name'))
sel.tag = ct.SelectorTag(tag, prefix)
has_selector = True
return has_selector
def parse_pseudo_class_custom(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""
Parse custom pseudo class alias.
Compile custom selectors as we need them. When compiling a custom selector,
set it to `None` in the dictionary so we can avoid an infinite loop.
"""
pseudo = util.lower(css_unescape(m.group('name')))
selector = self.custom.get(pseudo)
if selector is None:
raise SelectorSyntaxError(
"Undefined custom selector '{}' found at postion {}".format(pseudo, m.end(0)),
self.pattern,
m.end(0)
)
if not isinstance(selector, ct.SelectorList):
del self.custom[pseudo]
selector = CSSParser(
selector, custom=self.custom, flags=self.flags
).process_selectors(flags=FLG_PSEUDO)
self.custom[pseudo] = selector
sel.selectors.append(selector)
has_selector = True
return has_selector
def parse_pseudo_class(
self,
sel: _Selector,
m: Match[str],
has_selector: bool,
iselector: Iterator[Tuple[str, Match[str]]],
is_html: bool
) -> Tuple[bool, bool]:
"""Parse pseudo class."""
complex_pseudo = False
pseudo = util.lower(css_unescape(m.group('name')))
if m.group('open'):
complex_pseudo = True
if complex_pseudo and pseudo in PSEUDO_COMPLEX:
has_selector = self.parse_pseudo_open(sel, pseudo, has_selector, iselector, m.end(0))
elif not complex_pseudo and pseudo in PSEUDO_SIMPLE:
if pseudo == ':root':
sel.flags |= ct.SEL_ROOT
elif pseudo == ':defined':
sel.flags |= ct.SEL_DEFINED
is_html = True
elif pseudo == ':scope':
sel.flags |= ct.SEL_SCOPE
elif pseudo == ':empty':
sel.flags |= ct.SEL_EMPTY
elif pseudo in (':link', ':any-link'):
sel.selectors.append(CSS_LINK)
elif pseudo == ':checked':
sel.selectors.append(CSS_CHECKED)
elif pseudo == ':default':
sel.selectors.append(CSS_DEFAULT)
elif pseudo == ':indeterminate':
sel.selectors.append(CSS_INDETERMINATE)
elif pseudo == ":disabled":
sel.selectors.append(CSS_DISABLED)
elif pseudo == ":enabled":
sel.selectors.append(CSS_ENABLED)
elif pseudo == ":required":
sel.selectors.append(CSS_REQUIRED)
elif pseudo == ":optional":
sel.selectors.append(CSS_OPTIONAL)
elif pseudo == ":read-only":
sel.selectors.append(CSS_READ_ONLY)
elif pseudo == ":read-write":
sel.selectors.append(CSS_READ_WRITE)
elif pseudo == ":in-range":
sel.selectors.append(CSS_IN_RANGE)
elif pseudo == ":out-of-range":
sel.selectors.append(CSS_OUT_OF_RANGE)
elif pseudo == ":placeholder-shown":
sel.selectors.append(CSS_PLACEHOLDER_SHOWN)
elif pseudo == ':first-child':
sel.nth.append(ct.SelectorNth(1, False, 0, False, False, ct.SelectorList()))
elif pseudo == ':last-child':
sel.nth.append(ct.SelectorNth(1, False, 0, False, True, ct.SelectorList()))
elif pseudo == ':first-of-type':
sel.nth.append(ct.SelectorNth(1, False, 0, True, False, ct.SelectorList()))
elif pseudo == ':last-of-type':
sel.nth.append(ct.SelectorNth(1, False, 0, True, True, ct.SelectorList()))
elif pseudo == ':only-child':
sel.nth.extend(
[
ct.SelectorNth(1, False, 0, False, False, ct.SelectorList()),
ct.SelectorNth(1, False, 0, False, True, ct.SelectorList())
]
)
elif pseudo == ':only-of-type':
sel.nth.extend(
[
ct.SelectorNth(1, False, 0, True, False, ct.SelectorList()),
ct.SelectorNth(1, False, 0, True, True, ct.SelectorList())
]
)
has_selector = True
elif complex_pseudo and pseudo in PSEUDO_COMPLEX_NO_MATCH:
self.parse_selectors(iselector, m.end(0), FLG_PSEUDO | FLG_OPEN)
sel.no_match = True
has_selector = True
elif not complex_pseudo and pseudo in PSEUDO_SIMPLE_NO_MATCH:
sel.no_match = True
has_selector = True
elif pseudo in PSEUDO_SUPPORTED:
raise SelectorSyntaxError(
"Invalid syntax for pseudo class '{}'".format(pseudo),
self.pattern,
m.start(0)
)
else:
raise NotImplementedError(
"'{}' pseudo-class is not implemented at this time".format(pseudo)
)
return has_selector, is_html
def parse_pseudo_nth(
self,
sel: _Selector,
m: Match[str],
has_selector: bool,
iselector: Iterator[Tuple[str, Match[str]]]
) -> bool:
"""Parse `nth` pseudo."""
mdict = m.groupdict()
if mdict.get('pseudo_nth_child'):
postfix = '_child'
else:
postfix = '_type'
mdict['name'] = util.lower(css_unescape(mdict['name']))
content = util.lower(mdict.get('nth' + postfix))
if content == 'even':
# 2n
s1 = 2
s2 = 0
var = True
elif content == 'odd':
# 2n+1
s1 = 2
s2 = 1
var = True
else:
nth_parts = cast(Match[str], RE_NTH.match(content))
_s1 = '-' if nth_parts.group('s1') and nth_parts.group('s1') == '-' else ''
a = nth_parts.group('a')
var = a.endswith('n')
if a.startswith('n'):
_s1 += '1'
elif var:
_s1 += a[:-1]
else:
_s1 += a
_s2 = '-' if nth_parts.group('s2') and nth_parts.group('s2') == '-' else ''
if nth_parts.group('b'):
_s2 += nth_parts.group('b')
else:
_s2 = '0'
s1 = int(_s1, 10)
s2 = int(_s2, 10)
pseudo_sel = mdict['name']
if postfix == '_child':
if m.group('of'):
# Parse the rest of `of S`.
nth_sel = self.parse_selectors(iselector, m.end(0), FLG_PSEUDO | FLG_OPEN)
else:
# Use default `*|*` for `of S`.
nth_sel = CSS_NTH_OF_S_DEFAULT
if pseudo_sel == ':nth-child':
sel.nth.append(ct.SelectorNth(s1, var, s2, False, False, nth_sel))
elif pseudo_sel == ':nth-last-child':
sel.nth.append(ct.SelectorNth(s1, var, s2, False, True, nth_sel))
else:
if pseudo_sel == ':nth-of-type':
sel.nth.append(ct.SelectorNth(s1, var, s2, True, False, ct.SelectorList()))
elif pseudo_sel == ':nth-last-of-type':
sel.nth.append(ct.SelectorNth(s1, var, s2, True, True, ct.SelectorList()))
has_selector = True
return has_selector
def parse_pseudo_open(
self,
sel: _Selector,
name: str,
has_selector: bool,
iselector: Iterator[Tuple[str, Match[str]]],
index: int
) -> bool:
"""Parse pseudo with opening bracket."""
flags = FLG_PSEUDO | FLG_OPEN
if name == ':not':
flags |= FLG_NOT
elif name == ':has':
flags |= FLG_RELATIVE | FLG_FORGIVE
elif name in (':where', ':is'):
flags |= FLG_FORGIVE
sel.selectors.append(self.parse_selectors(iselector, index, flags))
has_selector = True
return has_selector
def parse_has_combinator(
self,
sel: _Selector,
m: Match[str],
has_selector: bool,
selectors: List[_Selector],
rel_type: str,
index: int
) -> Tuple[bool, _Selector, str]:
"""Parse combinator tokens."""
combinator = m.group('relation').strip()
if not combinator:
combinator = WS_COMBINATOR
if combinator == COMMA_COMBINATOR:
if not has_selector:
# If we've not captured any selector parts, the comma is either at the beginning of the pattern
# or following another comma, both of which are unexpected. But shouldn't fail the pseudo-class.
sel.no_match = True
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
rel_type = ":" + WS_COMBINATOR
selectors.append(_Selector())
else:
if has_selector:
# End the current selector and associate the leading combinator with this selector.
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
elif rel_type[1:] != WS_COMBINATOR:
# It's impossible to have two whitespace combinators after each other as the patterns
# will gobble up trailing whitespace. It is also impossible to have a whitespace
# combinator after any other kind for the same reason. But we could have
# multiple non-whitespace combinators. So if the current combinator is not a whitespace,
# then we've hit the multiple combinator case, so we should fail.
raise SelectorSyntaxError(
'The multiple combinators at position {}'.format(index),
self.pattern,
index
)
# Set the leading combinator for the next selector.
rel_type = ':' + combinator
sel = _Selector()
has_selector = False
return has_selector, sel, rel_type
def parse_combinator(
self,
sel: _Selector,
m: Match[str],
has_selector: bool,
selectors: List[_Selector],
relations: List[_Selector],
is_pseudo: bool,
is_forgive: bool,
index: int
) -> Tuple[bool, _Selector]:
"""Parse combinator tokens."""
combinator = m.group('relation').strip()
if not combinator:
combinator = WS_COMBINATOR
if not has_selector:
if not is_forgive or combinator != COMMA_COMBINATOR:
raise SelectorSyntaxError(
"The combinator '{}' at postion {}, must have a selector before it".format(combinator, index),
self.pattern,
index
)
# If we are in a forgiving pseudo class, just make the selector a "no match"
if combinator == COMMA_COMBINATOR:
sel.no_match = True
del relations[:]
selectors.append(sel)
else:
if combinator == COMMA_COMBINATOR:
if not sel.tag and not is_pseudo:
# Implied `*`
sel.tag = ct.SelectorTag('*', None)
sel.relations.extend(relations)
selectors.append(sel)
del relations[:]
else:
sel.relations.extend(relations)
sel.rel_type = combinator
del relations[:]
relations.append(sel)
sel = _Selector()
has_selector = False
return has_selector, sel
def parse_class_id(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Parse HTML classes and ids."""
selector = m.group(0)
if selector.startswith('.'):
sel.classes.append(css_unescape(selector[1:]))
else:
sel.ids.append(css_unescape(selector[1:]))
has_selector = True
return has_selector
def parse_pseudo_contains(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Parse contains."""
pseudo = util.lower(css_unescape(m.group('name')))
if pseudo == ":contains":
warnings.warn(
"The pseudo class ':contains' is deprecated, ':-soup-contains' should be used moving forward.",
FutureWarning
)
contains_own = pseudo == ":-soup-contains-own"
values = css_unescape(m.group('values'))
patterns = []
for token in RE_VALUES.finditer(values):
if token.group('split'):
continue
value = token.group('value')
if value.startswith(("'", '"')):
value = css_unescape(value[1:-1], True)
else:
value = css_unescape(value)
patterns.append(value)
sel.contains.append(ct.SelectorContains(patterns, contains_own))
has_selector = True
return has_selector
def parse_pseudo_lang(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Parse pseudo language."""
values = m.group('values')
patterns = []
for token in RE_VALUES.finditer(values):
if token.group('split'):
continue
value = token.group('value')
if value.startswith(('"', "'")):
value = css_unescape(value[1:-1], True)
else:
value = css_unescape(value)
patterns.append(value)
sel.lang.append(ct.SelectorLang(patterns))
has_selector = True
return has_selector
def parse_pseudo_dir(self, sel: _Selector, m: Match[str], has_selector: bool) -> bool:
"""Parse pseudo direction."""
value = ct.SEL_DIR_LTR if util.lower(m.group('dir')) == 'ltr' else ct.SEL_DIR_RTL
sel.flags |= value
has_selector = True
return has_selector
def parse_selectors(
self,
iselector: Iterator[Tuple[str, Match[str]]],
index: int = 0,
flags: int = 0
) -> ct.SelectorList:
"""Parse selectors."""
# Initialize important variables
sel = _Selector()
selectors = []
has_selector = False
closed = False
relations = [] # type: List[_Selector]
rel_type = ":" + WS_COMBINATOR
# Setup various flags
is_open = bool(flags & FLG_OPEN)
is_pseudo = bool(flags & FLG_PSEUDO)
is_relative = bool(flags & FLG_RELATIVE)
is_not = bool(flags & FLG_NOT)
is_html = bool(flags & FLG_HTML)
is_default = bool(flags & FLG_DEFAULT)
is_indeterminate = bool(flags & FLG_INDETERMINATE)
is_in_range = bool(flags & FLG_IN_RANGE)
is_out_of_range = bool(flags & FLG_OUT_OF_RANGE)
is_placeholder_shown = bool(flags & FLG_PLACEHOLDER_SHOWN)
is_forgive = bool(flags & FLG_FORGIVE)
# Print out useful debug stuff
if self.debug: # pragma: no cover
if is_pseudo:
print(' is_pseudo: True')
if is_open:
print(' is_open: True')
if is_relative:
print(' is_relative: True')
if is_not:
print(' is_not: True')
if is_html:
print(' is_html: True')
if is_default:
print(' is_default: True')
if is_indeterminate:
print(' is_indeterminate: True')
if is_in_range:
print(' is_in_range: True')
if is_out_of_range:
print(' is_out_of_range: True')
if is_placeholder_shown:
print(' is_placeholder_shown: True')
if is_forgive:
print(' is_forgive: True')
# The algorithm for relative selectors require an initial selector in the selector list
if is_relative:
selectors.append(_Selector())
try:
while True:
key, m = next(iselector)
# Handle parts
if key == "at_rule":
raise NotImplementedError("At-rules found at position {}".format(m.start(0)))
elif key == 'pseudo_class_custom':
has_selector = self.parse_pseudo_class_custom(sel, m, has_selector)
elif key == 'pseudo_class':
has_selector, is_html = self.parse_pseudo_class(sel, m, has_selector, iselector, is_html)
elif key == 'pseudo_element':
raise NotImplementedError("Pseudo-element found at position {}".format(m.start(0)))
elif key == 'pseudo_contains':
has_selector = self.parse_pseudo_contains(sel, m, has_selector)
elif key in ('pseudo_nth_type', 'pseudo_nth_child'):
has_selector = self.parse_pseudo_nth(sel, m, has_selector, iselector)
elif key == 'pseudo_lang':
has_selector = self.parse_pseudo_lang(sel, m, has_selector)
elif key == 'pseudo_dir':
has_selector = self.parse_pseudo_dir(sel, m, has_selector)
# Currently only supports HTML
is_html = True
elif key == 'pseudo_close':
if not has_selector:
if not is_forgive:
raise SelectorSyntaxError(
"Expected a selector at postion {}".format(m.start(0)),
self.pattern,
m.start(0)
)
sel.no_match = True
if is_open:
closed = True
break
else:
raise SelectorSyntaxError(
"Unmatched pseudo-class close at postion {}".format(m.start(0)),
self.pattern,
m.start(0)
)
elif key == 'combine':
if is_relative:
has_selector, sel, rel_type = self.parse_has_combinator(
sel, m, has_selector, selectors, rel_type, index
)
else:
has_selector, sel = self.parse_combinator(
sel, m, has_selector, selectors, relations, is_pseudo, is_forgive, index
)
elif key == 'attribute':
has_selector = self.parse_attribute_selector(sel, m, has_selector)
elif key == 'tag':
if has_selector:
raise SelectorSyntaxError(
"Tag name found at position {} instead of at the start".format(m.start(0)),
self.pattern,
m.start(0)
)
has_selector = self.parse_tag_pattern(sel, m, has_selector)
elif key in ('class', 'id'):
has_selector = self.parse_class_id(sel, m, has_selector)
index = m.end(0)
except StopIteration:
pass
# Handle selectors that are not closed
if is_open and not closed:
raise SelectorSyntaxError(
"Unclosed pseudo-class at position {}".format(index),
self.pattern,
index
)
# Cleanup completed selector piece
if has_selector:
if not sel.tag and not is_pseudo:
# Implied `*`
sel.tag = ct.SelectorTag('*', None)
if is_relative:
sel.rel_type = rel_type
selectors[-1].relations.append(sel)
else:
sel.relations.extend(relations)
del relations[:]
selectors.append(sel)
# Forgive empty slots in pseudo-classes that have lists (and are forgiving)
elif is_forgive:
if is_relative:
# Handle relative selectors pseudo-classes with empty slots like `:has()`
if selectors and selectors[-1].rel_type is None and rel_type == ': ':
sel.rel_type = rel_type
sel.no_match = True
selectors[-1].relations.append(sel)
has_selector = True
else:
# Handle normal pseudo-classes with empty slots
if not selectors or not relations:
# Others like `:is()` etc.
sel.no_match = True
del relations[:]
selectors.append(sel)
has_selector = True
if not has_selector:
# We will always need to finish a selector when `:has()` is used as it leads with combining.
# May apply to others as well.
raise SelectorSyntaxError(
'Expected a selector at position {}'.format(index),
self.pattern,
index
)
# Some patterns require additional logic, such as default. We try to make these the
# last pattern, and append the appropriate flag to that selector which communicates
# to the matcher what additional logic is required.
if is_default:
selectors[-1].flags = ct.SEL_DEFAULT
if is_indeterminate:
selectors[-1].flags = ct.SEL_INDETERMINATE
if is_in_range:
selectors[-1].flags = ct.SEL_IN_RANGE
if is_out_of_range:
selectors[-1].flags = ct.SEL_OUT_OF_RANGE
if is_placeholder_shown:
selectors[-1].flags = ct.SEL_PLACEHOLDER_SHOWN
# Return selector list
return ct.SelectorList([s.freeze() for s in selectors], is_not, is_html)
def selector_iter(self, pattern: str) -> Iterator[Tuple[str, Match[str]]]:
"""Iterate selector tokens."""
# Ignore whitespace and comments at start and end of pattern
m = RE_WS_BEGIN.search(pattern)
index = m.end(0) if m else 0
m = RE_WS_END.search(pattern)
end = (m.start(0) - 1) if m else (len(pattern) - 1)
if self.debug: # pragma: no cover
print('## PARSING: {!r}'.format(pattern))
while index <= end:
m = None
for v in self.css_tokens:
m = v.match(pattern, index, self.flags)
if m:
name = v.get_name()
if self.debug: # pragma: no cover
print("TOKEN: '{}' --> {!r} at position {}".format(name, m.group(0), m.start(0)))
index = m.end(0)
yield name, m
break
if m is None:
c = pattern[index]
# If the character represents the start of one of the known selector types,
# throw an exception mentioning that the known selector type is in error;
# otherwise, report the invalid character.
if c == '[':
msg = "Malformed attribute selector at position {}".format(index)
elif c == '.':
msg = "Malformed class selector at position {}".format(index)
elif c == '#':
msg = "Malformed id selector at position {}".format(index)
elif c == ':':
msg = "Malformed pseudo-class selector at position {}".format(index)
else:
msg = "Invalid character {!r} position {}".format(c, index)
raise SelectorSyntaxError(msg, self.pattern, index)
if self.debug: # pragma: no cover
print('## END PARSING')
def process_selectors(self, index: int = 0, flags: int = 0) -> ct.SelectorList:
"""Process selectors."""
return self.parse_selectors(self.selector_iter(self.pattern), index, flags)
# Precompile CSS selector lists for pseudo-classes (additional logic may be required beyond the pattern)
# A few patterns are order dependent as they use patterns previous compiled.
# CSS pattern for `:link` and `:any-link`
CSS_LINK = CSSParser(
'html|*:is(a, area)[href]'
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:checked`
CSS_CHECKED = CSSParser(
'''
html|*:is(input[type=checkbox], input[type=radio])[checked], html|option[selected]
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:default` (must compile CSS_CHECKED first)
CSS_DEFAULT = CSSParser(
'''
:checked,
/*
This pattern must be at the end.
Special logic is applied to the last selector.
*/
html|form html|*:is(button, input)[type="submit"]
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML | FLG_DEFAULT)
# CSS pattern for `:indeterminate`
CSS_INDETERMINATE = CSSParser(
'''
html|input[type="checkbox"][indeterminate],
html|input[type="radio"]:is(:not([name]), [name=""]):not([checked]),
html|progress:not([value]),
/*
This pattern must be at the end.
Special logic is applied to the last selector.
*/
html|input[type="radio"][name]:not([name='']):not([checked])
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML | FLG_INDETERMINATE)
# CSS pattern for `:disabled`
CSS_DISABLED = CSSParser(
'''
html|*:is(input:not([type=hidden]), button, select, textarea, fieldset, optgroup, option, fieldset)[disabled],
html|optgroup[disabled] > html|option,
html|fieldset[disabled] > html|*:is(input:not([type=hidden]), button, select, textarea, fieldset),
html|fieldset[disabled] >
html|*:not(legend:nth-of-type(1)) html|*:is(input:not([type=hidden]), button, select, textarea, fieldset)
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:enabled`
CSS_ENABLED = CSSParser(
'''
html|*:is(input:not([type=hidden]), button, select, textarea, fieldset, optgroup, option, fieldset):not(:disabled)
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:required`
CSS_REQUIRED = CSSParser(
'html|*:is(input, textarea, select)[required]'
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:optional`
CSS_OPTIONAL = CSSParser(
'html|*:is(input, textarea, select):not([required])'
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:placeholder-shown`
CSS_PLACEHOLDER_SHOWN = CSSParser(
'''
html|input:is(
:not([type]),
[type=""],
[type=text],
[type=search],
[type=url],
[type=tel],
[type=email],
[type=password],
[type=number]
)[placeholder]:not([placeholder='']):is(:not([value]), [value=""]),
html|textarea[placeholder]:not([placeholder=''])
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML | FLG_PLACEHOLDER_SHOWN)
# CSS pattern default for `:nth-child` "of S" feature
CSS_NTH_OF_S_DEFAULT = CSSParser(
'*|*'
).process_selectors(flags=FLG_PSEUDO)
# CSS pattern for `:read-write` (CSS_DISABLED must be compiled first)
CSS_READ_WRITE = CSSParser(
'''
html|*:is(
textarea,
input:is(
:not([type]),
[type=""],
[type=text],
[type=search],
[type=url],
[type=tel],
[type=email],
[type=number],
[type=password],
[type=date],
[type=datetime-local],
[type=month],
[type=time],
[type=week]
)
):not([readonly], :disabled),
html|*:is([contenteditable=""], [contenteditable="true" i])
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:read-only`
CSS_READ_ONLY = CSSParser(
'''
html|*:not(:read-write)
'''
).process_selectors(flags=FLG_PSEUDO | FLG_HTML)
# CSS pattern for `:in-range`
CSS_IN_RANGE = CSSParser(
'''
html|input:is(
[type="date"],
[type="month"],
[type="week"],
[type="time"],
[type="datetime-local"],
[type="number"],
[type="range"]
):is(
[min],
[max]
)
'''
).process_selectors(flags=FLG_PSEUDO | FLG_IN_RANGE | FLG_HTML)
# CSS pattern for `:out-of-range`
CSS_OUT_OF_RANGE = CSSParser(
'''
html|input:is(
[type="date"],
[type="month"],
[type="week"],
[type="time"],
[type="datetime-local"],
[type="number"],
[type="range"]
):is(
[min],
[max]
)
'''
).process_selectors(flags=FLG_PSEUDO | FLG_OUT_OF_RANGE | FLG_HTML)
| 47,903
|
Python
|
.py
| 1,174
| 30.641397
| 118
| 0.551134
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,564
|
__init__.py
|
rembo10_headphones/lib/soupsieve/__init__.py
|
"""
Soup Sieve.
A CSS selector filter for BeautifulSoup4.
MIT License
Copyright (c) 2018 Isaac Muse
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .__meta__ import __version__, __version_info__ # noqa: F401
from . import css_parser as cp
from . import css_match as cm
from . import css_types as ct
from .util import DEBUG, SelectorSyntaxError # noqa: F401
import bs4 # type: ignore[import]
from typing import Dict, Optional, Any, List, Iterator, Iterable
__all__ = (
'DEBUG', 'SelectorSyntaxError', 'SoupSieve',
'closest', 'compile', 'filter', 'iselect',
'match', 'select', 'select_one'
)
SoupSieve = cm.SoupSieve
def compile( # noqa: A001
pattern: str,
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> cm.SoupSieve:
"""Compile CSS pattern."""
ns = ct.Namespaces(namespaces) if namespaces is not None else namespaces # type: Optional[ct.Namespaces]
cs = ct.CustomSelectors(custom) if custom is not None else custom # type: Optional[ct.CustomSelectors]
if isinstance(pattern, SoupSieve):
if flags:
raise ValueError("Cannot process 'flags' argument on a compiled selector list")
elif namespaces is not None:
raise ValueError("Cannot process 'namespaces' argument on a compiled selector list")
elif custom is not None:
raise ValueError("Cannot process 'custom' argument on a compiled selector list")
return pattern
return cp._cached_css_compile(pattern, ns, cs, flags)
def purge() -> None:
"""Purge cached patterns."""
cp._purge_cache()
def closest(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> 'bs4.Tag':
"""Match closest ancestor."""
return compile(select, namespaces, flags, **kwargs).closest(tag)
def match(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> bool:
"""Match node."""
return compile(select, namespaces, flags, **kwargs).match(tag)
def filter( # noqa: A001
select: str,
iterable: Iterable['bs4.Tag'],
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> List['bs4.Tag']:
"""Filter list of nodes."""
return compile(select, namespaces, flags, **kwargs).filter(iterable)
def select_one(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> 'bs4.Tag':
"""Select a single tag."""
return compile(select, namespaces, flags, **kwargs).select_one(tag)
def select(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
limit: int = 0,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> List['bs4.Tag']:
"""Select the specified tags."""
return compile(select, namespaces, flags, **kwargs).select(tag, limit)
def iselect(
select: str,
tag: 'bs4.Tag',
namespaces: Optional[Dict[str, str]] = None,
limit: int = 0,
flags: int = 0,
*,
custom: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> Iterator['bs4.Tag']:
"""Iterate the specified tags."""
for el in compile(select, namespaces, flags, **kwargs).iselect(tag, limit):
yield el
def escape(ident: str) -> str:
"""Escape identifier."""
return cp.escape(ident)
| 4,717
|
Python
|
.py
| 129
| 32.395349
| 109
| 0.683366
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,565
|
__meta__.py
|
rembo10_headphones/lib/soupsieve/__meta__.py
|
"""Meta related things."""
from collections import namedtuple
import re
RE_VER = re.compile(
r'''(?x)
(?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.(?P<micro>\d+))?
(?:(?P<type>a|b|rc)(?P<pre>\d+))?
(?:\.post(?P<post>\d+))?
(?:\.dev(?P<dev>\d+))?
'''
)
REL_MAP = {
".dev": "",
".dev-alpha": "a",
".dev-beta": "b",
".dev-candidate": "rc",
"alpha": "a",
"beta": "b",
"candidate": "rc",
"final": ""
}
DEV_STATUS = {
".dev": "2 - Pre-Alpha",
".dev-alpha": "2 - Pre-Alpha",
".dev-beta": "2 - Pre-Alpha",
".dev-candidate": "2 - Pre-Alpha",
"alpha": "3 - Alpha",
"beta": "4 - Beta",
"candidate": "4 - Beta",
"final": "5 - Production/Stable"
}
PRE_REL_MAP = {"a": 'alpha', "b": 'beta', "rc": 'candidate'}
class Version(namedtuple("Version", ["major", "minor", "micro", "release", "pre", "post", "dev"])):
"""
Get the version (PEP 440).
A biased approach to the PEP 440 semantic version.
Provides a tuple structure which is sorted for comparisons `v1 > v2` etc.
(major, minor, micro, release type, pre-release build, post-release build, development release build)
Release types are named in is such a way they are comparable with ease.
Accessors to check if a development, pre-release, or post-release build. Also provides accessor to get
development status for setup files.
How it works (currently):
- You must specify a release type as either `final`, `alpha`, `beta`, or `candidate`.
- To define a development release, you can use either `.dev`, `.dev-alpha`, `.dev-beta`, or `.dev-candidate`.
The dot is used to ensure all development specifiers are sorted before `alpha`.
You can specify a `dev` number for development builds, but do not have to as implicit development releases
are allowed.
- You must specify a `pre` value greater than zero if using a prerelease as this project (not PEP 440) does not
allow implicit prereleases.
- You can optionally set `post` to a value greater than zero to make the build a post release. While post releases
are technically allowed in prereleases, it is strongly discouraged, so we are rejecting them. It should be
noted that we do not allow `post0` even though PEP 440 does not restrict this. This project specifically
does not allow implicit post releases.
- It should be noted that we do not support epochs `1!` or local versions `+some-custom.version-1`.
Acceptable version releases:
```
Version(1, 0, 0, "final") 1.0
Version(1, 2, 0, "final") 1.2
Version(1, 2, 3, "final") 1.2.3
Version(1, 2, 0, ".dev-alpha", pre=4) 1.2a4
Version(1, 2, 0, ".dev-beta", pre=4) 1.2b4
Version(1, 2, 0, ".dev-candidate", pre=4) 1.2rc4
Version(1, 2, 0, "final", post=1) 1.2.post1
Version(1, 2, 3, ".dev") 1.2.3.dev0
Version(1, 2, 3, ".dev", dev=1) 1.2.3.dev1
```
"""
def __new__(
cls,
major: int, minor: int, micro: int, release: str = "final",
pre: int = 0, post: int = 0, dev: int = 0
) -> "Version":
"""Validate version info."""
# Ensure all parts are positive integers.
for value in (major, minor, micro, pre, post):
if not (isinstance(value, int) and value >= 0):
raise ValueError("All version parts except 'release' should be integers.")
if release not in REL_MAP:
raise ValueError("'{}' is not a valid release type.".format(release))
# Ensure valid pre-release (we do not allow implicit pre-releases).
if ".dev-candidate" < release < "final":
if pre == 0:
raise ValueError("Implicit pre-releases not allowed.")
elif dev:
raise ValueError("Version is not a development release.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure valid development or development/pre release
elif release < "alpha":
if release > ".dev" and pre == 0:
raise ValueError("Implicit pre-release not allowed.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure a valid normal release
else:
if pre:
raise ValueError("Version is not a pre-release.")
elif dev:
raise ValueError("Version is not a development release.")
return super(Version, cls).__new__(cls, major, minor, micro, release, pre, post, dev)
def _is_pre(self) -> bool:
"""Is prerelease."""
return bool(self.pre > 0)
def _is_dev(self) -> bool:
"""Is development."""
return bool(self.release < "alpha")
def _is_post(self) -> bool:
"""Is post."""
return bool(self.post > 0)
def _get_dev_status(self) -> str: # pragma: no cover
"""Get development status string."""
return DEV_STATUS[self.release]
def _get_canonical(self) -> str:
"""Get the canonical output string."""
# Assemble major, minor, micro version and append `pre`, `post`, or `dev` if needed..
if self.micro == 0:
ver = "{}.{}".format(self.major, self.minor)
else:
ver = "{}.{}.{}".format(self.major, self.minor, self.micro)
if self._is_pre():
ver += '{}{}'.format(REL_MAP[self.release], self.pre)
if self._is_post():
ver += ".post{}".format(self.post)
if self._is_dev():
ver += ".dev{}".format(self.dev)
return ver
def parse_version(ver: str) -> Version:
"""Parse version into a comparable Version tuple."""
m = RE_VER.match(ver)
if m is None:
raise ValueError("'{}' is not a valid version".format(ver))
# Handle major, minor, micro
major = int(m.group('major'))
minor = int(m.group('minor')) if m.group('minor') else 0
micro = int(m.group('micro')) if m.group('micro') else 0
# Handle pre releases
if m.group('type'):
release = PRE_REL_MAP[m.group('type')]
pre = int(m.group('pre'))
else:
release = "final"
pre = 0
# Handle development releases
dev = m.group('dev') if m.group('dev') else 0
if m.group('dev'):
dev = int(m.group('dev'))
release = '.dev-' + release if pre else '.dev'
else:
dev = 0
# Handle post
post = int(m.group('post')) if m.group('post') else 0
return Version(major, minor, micro, release, pre, post, dev)
__version_info__ = Version(2, 3, 1, "final")
__version__ = __version_info__._get_canonical()
| 6,809
|
Python
|
.py
| 154
| 36.863636
| 118
| 0.584304
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,566
|
pretty.py
|
rembo10_headphones/lib/soupsieve/pretty.py
|
"""
Format a pretty string of a `SoupSieve` object for easy debugging.
This won't necessarily support all types and such, and definitely
not support custom outputs.
It is mainly geared towards our types as the `SelectorList`
object is a beast to look at without some indentation and newlines.
The format and various output types is fairly known (though it
hasn't been tested extensively to make sure we aren't missing corners).
Example:
```
>>> import soupsieve as sv
>>> sv.compile('this > that.class[name=value]').selectors.pretty()
SelectorList(
selectors=(
Selector(
tag=SelectorTag(
name='that',
prefix=None),
ids=(),
classes=(
'class',
),
attributes=(
SelectorAttribute(
attribute='name',
prefix='',
pattern=re.compile(
'^value$'),
xml_type_pattern=None),
),
nth=(),
selectors=(),
relation=SelectorList(
selectors=(
Selector(
tag=SelectorTag(
name='this',
prefix=None),
ids=(),
classes=(),
attributes=(),
nth=(),
selectors=(),
relation=SelectorList(
selectors=(),
is_not=False,
is_html=False),
rel_type='>',
contains=(),
lang=(),
flags=0),
),
is_not=False,
is_html=False),
rel_type=None,
contains=(),
lang=(),
flags=0),
),
is_not=False,
is_html=False)
```
"""
import re
from typing import Any
RE_CLASS = re.compile(r'(?i)[a-z_][_a-z\d\.]+\(')
RE_PARAM = re.compile(r'(?i)[_a-z][_a-z\d]+=')
RE_EMPTY = re.compile(r'\(\)|\[\]|\{\}')
RE_LSTRT = re.compile(r'\[')
RE_DSTRT = re.compile(r'\{')
RE_TSTRT = re.compile(r'\(')
RE_LEND = re.compile(r'\]')
RE_DEND = re.compile(r'\}')
RE_TEND = re.compile(r'\)')
RE_INT = re.compile(r'\d+')
RE_KWORD = re.compile(r'(?i)[_a-z][_a-z\d]+')
RE_DQSTR = re.compile(r'"(?:\\.|[^"\\])*"')
RE_SQSTR = re.compile(r"'(?:\\.|[^'\\])*'")
RE_SEP = re.compile(r'\s*(,)\s*')
RE_DSEP = re.compile(r'\s*(:)\s*')
TOKENS = {
'class': RE_CLASS,
'param': RE_PARAM,
'empty': RE_EMPTY,
'lstrt': RE_LSTRT,
'dstrt': RE_DSTRT,
'tstrt': RE_TSTRT,
'lend': RE_LEND,
'dend': RE_DEND,
'tend': RE_TEND,
'sqstr': RE_SQSTR,
'sep': RE_SEP,
'dsep': RE_DSEP,
'int': RE_INT,
'kword': RE_KWORD,
'dqstr': RE_DQSTR
}
def pretty(obj: Any) -> str: # pragma: no cover
"""Make the object output string pretty."""
sel = str(obj)
index = 0
end = len(sel) - 1
indent = 0
output = []
while index <= end:
m = None
for k, v in TOKENS.items():
m = v.match(sel, index)
if m:
name = k
index = m.end(0)
if name in ('class', 'lstrt', 'dstrt', 'tstrt'):
indent += 4
output.append('{}\n{}'.format(m.group(0), " " * indent))
elif name in ('param', 'int', 'kword', 'sqstr', 'dqstr', 'empty'):
output.append(m.group(0))
elif name in ('lend', 'dend', 'tend'):
indent -= 4
output.append(m.group(0))
elif name in ('sep',):
output.append('{}\n{}'.format(m.group(1), " " * indent))
elif name in ('dsep',):
output.append('{} '.format(m.group(1)))
break
return ''.join(output)
| 4,018
|
Python
|
.py
| 125
| 21.032
| 82
| 0.449884
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,567
|
css_types.py
|
rembo10_headphones/lib/soupsieve/css_types.py
|
"""CSS selector structure items."""
import copyreg
from .pretty import pretty
from typing import Any, Type, Tuple, Union, Dict, Iterator, Hashable, Optional, Pattern, Iterable, Mapping
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable:
"""Immutable."""
__slots__: Tuple[str, ...] = ('_hash',)
_hash: int
def __init__(self, **kwargs: Any) -> None:
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls) -> "Type[Immutable]":
"""Get base class."""
return cls
def __eq__(self, other: Any) -> bool:
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other: Any) -> bool:
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self) -> int:
"""Hash."""
return self._hash
def __setattr__(self, name: str, value: Any) -> None:
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self) -> str: # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__class__.__name__, ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
def pretty(self) -> None: # pragma: no cover
"""Pretty print."""
print(pretty(self))
class ImmutableDict(Mapping[Any, Any]):
"""Hashable, immutable dictionary."""
def __init__(
self,
arg: Union[Dict[Any, Any], Iterable[Tuple[Any, Any]]]
) -> None:
"""Initialize."""
self._validate(arg)
self._d = dict(arg)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def _validate(self, arg: Union[Dict[Any, Any], Iterable[Tuple[Any, Any]]]) -> None:
"""Validate arguments."""
if isinstance(arg, dict):
if not all([isinstance(v, Hashable) for v in arg.values()]):
raise TypeError('{} values must be hashable'.format(self.__class__.__name__))
elif not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg]):
raise TypeError('{} values must be hashable'.format(self.__class__.__name__))
def __iter__(self) -> Iterator[Any]:
"""Iterator."""
return iter(self._d)
def __len__(self) -> int:
"""Length."""
return len(self._d)
def __getitem__(self, key: Any) -> Any:
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self) -> int:
"""Hash."""
return self._hash
def __repr__(self) -> str: # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, arg: Union[Dict[str, str], Iterable[Tuple[str, str]]]) -> None:
"""Initialize."""
super().__init__(arg)
def _validate(self, arg: Union[Dict[str, str], Iterable[Tuple[str, str]]]) -> None:
"""Validate arguments."""
if isinstance(arg, dict):
if not all([isinstance(v, str) for v in arg.values()]):
raise TypeError('{} values must be hashable'.format(self.__class__.__name__))
elif not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('{} keys and values must be Unicode strings'.format(self.__class__.__name__))
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, arg: Union[Dict[str, str], Iterable[Tuple[str, str]]]) -> None:
"""Initialize."""
super().__init__(arg)
def _validate(self, arg: Union[Dict[str, str], Iterable[Tuple[str, str]]]) -> None:
"""Validate arguments."""
if isinstance(arg, dict):
if not all([isinstance(v, str) for v in arg.values()]):
raise TypeError('{} values must be hashable'.format(self.__class__.__name__))
elif not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('{} keys and values must be Unicode strings'.format(self.__class__.__name__))
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
tag: Optional['SelectorTag']
ids: Tuple[str, ...]
classes: Tuple[str, ...]
attributes: Tuple['SelectorAttribute', ...]
nth: Tuple['SelectorNth', ...]
selectors: Tuple['SelectorList', ...]
relation: 'SelectorList'
rel_type: Optional[str]
contains: Tuple['SelectorContains', ...]
lang: Tuple['SelectorLang', ...]
flags: int
def __init__(
self,
tag: Optional['SelectorTag'],
ids: Tuple[str, ...],
classes: Tuple[str, ...],
attributes: Tuple['SelectorAttribute', ...],
nth: Tuple['SelectorNth', ...],
selectors: Tuple['SelectorList', ...],
relation: 'SelectorList',
rel_type: Optional[str],
contains: Tuple['SelectorContains', ...],
lang: Tuple['SelectorLang', ...],
flags: int
):
"""Initialize."""
super().__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self) -> None:
"""Initialize."""
super().__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
name: str
prefix: Optional[str]
def __init__(self, name: str, prefix: Optional[str]) -> None:
"""Initialize."""
super().__init__(name=name, prefix=prefix)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
attribute: str
prefix: str
pattern: Optional[Pattern[str]]
xml_type_pattern: Optional[Pattern[str]]
def __init__(
self,
attribute: str,
prefix: str,
pattern: Optional[Pattern[str]],
xml_type_pattern: Optional[Pattern[str]]
) -> None:
"""Initialize."""
super().__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "own", "_hash")
text: Tuple[str, ...]
own: bool
def __init__(self, text: Iterable[str], own: bool) -> None:
"""Initialize."""
super().__init__(text=tuple(text), own=own)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
a: int
n: bool
b: int
of_type: bool
last: bool
selectors: 'SelectorList'
def __init__(self, a: int, n: bool, b: int, of_type: bool, last: bool, selectors: 'SelectorList') -> None:
"""Initialize."""
super().__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
languages: Tuple[str, ...]
def __init__(self, languages: Iterable[str]):
"""Initialize."""
super().__init__(languages=tuple(languages))
def __iter__(self) -> Iterator[str]:
"""Iterator."""
return iter(self.languages)
def __len__(self) -> int: # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index: int) -> str: # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
selectors: Tuple[Union['Selector', 'SelectorNull'], ...]
is_not: bool
is_html: bool
def __init__(
self,
selectors: Optional[Iterable[Union['Selector', 'SelectorNull']]] = None,
is_not: bool = False,
is_html: bool = False
) -> None:
"""Initialize."""
super().__init__(
selectors=tuple(selectors) if selectors is not None else tuple(),
is_not=is_not,
is_html=is_html
)
def __iter__(self) -> Iterator[Union['Selector', 'SelectorNull']]:
"""Iterator."""
return iter(self.selectors)
def __len__(self) -> int:
"""Length."""
return len(self.selectors)
def __getitem__(self, index: int) -> Union['Selector', 'SelectorNull']:
"""Get item."""
return self.selectors[index]
def _pickle(p: Any) -> Any:
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj: Any) -> None:
"""Allow object to be pickled."""
copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)
| 10,479
|
Python
|
.py
| 289
| 28.761246
| 116
| 0.563245
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,568
|
_cpmodpy.py
|
rembo10_headphones/lib/cherrypy/_cpmodpy.py
|
"""Native adapter for serving CherryPy via mod_python.
Basic usage:
##########################################
# Application in a module called myapp.py
##########################################
import cherrypy
class Root:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import io
import logging
import os
import re
import sys
from more_itertools import always_iterable
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup"
# directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update({'log.screen': False,
'tools.ignore_headers.on': True,
'tools.ignore_headers.headers': ['Range'],
})
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.unsubscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
@engine.subscribe('log')
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
class _ReadOnlyRequest:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(
local[0], local[1], req.connection.local_host or '')
remote = req.connection.remote_addr
remote = httputil.Host(
remote[0], remote[1], req.connection.remote_host or '')
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = ("You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
'of mod_python < 3.1')
options = req.get_options()
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % 'multithread')
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % 'multiprocess')
sn = cherrypy.tree.script_name(req.uri or '/')
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ''
reqproto = req.protocol
headers = list(req.headers_in.copy().items())
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(local, remote, scheme,
'HTTP/1.1')
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same URL '
'twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
send_response(
req, response.output_status, response.header_list,
response.body, response.stream)
finally:
app.release_serving()
except Exception:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = 'text/plain'
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
for seg in always_iterable(body):
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
try:
import subprocess
def popen(fullcmd):
p = subprocess.Popen(fullcmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
return p.stdout
except ImportError:
def popen(fullcmd):
pipein, pipeout = os.popen4(fullcmd)
return pipeout
def read_process(cmd, args=''):
fullcmd = '%s %s' % (cmd, args)
pipeout = popen(fullcmd)
try:
firstline = pipeout.readline()
cmd_not_found = re.search(
b'(not recognized|No such file|not found)',
firstline,
re.IGNORECASE
)
if cmd_not_found:
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
class ModPythonServer(object):
template = """
# Apache2 server configuration file for running CherryPy with mod_python.
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
<Location %(loc)s>
SetHandler python-program
PythonHandler %(handler)s
PythonDebug On
%(opts)s
</Location>
"""
def __init__(self, loc='/', port=80, opts=None, apache_path='apache',
handler='cherrypy._cpmodpy::handler'):
self.loc = loc
self.port = port
self.opts = opts
self.apache_path = apache_path
self.handler = handler
def start(self):
opts = ''.join([' PythonOption %s %s\n' % (k, v)
for k, v in self.opts])
conf_data = self.template % {'port': self.port,
'loc': self.loc,
'opts': opts,
'handler': self.handler,
}
mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf')
with open(mpconf, 'wb') as f:
f.write(conf_data)
response = read_process(self.apache_path, '-k start -f %s' % mpconf)
self.ready = True
return response
def stop(self):
os.popen('apache -k stop')
self.ready = False
| 11,097
|
Python
|
.py
| 286
| 27.821678
| 80
| 0.551368
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,569
|
_cpchecker.py
|
rembo10_headphones/lib/cherrypy/_cpchecker.py
|
"""Checker for CherryPy sites and mounted apps."""
import os
import warnings
import builtins
import cherrypy
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
"""Initialize Checker instance."""
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith('check_'):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Format a warning."""
return 'CherryPy Checker:\n%s\n\n' % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for App config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip('/').split('/')
for key in app.config.keys():
key_atoms = key.strip('/').split('/')
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
'The application mounted at %r has config '
'entries that start with its script name: %r' % (sn,
key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in app.config.items():
if section.startswith('/'):
for key, value in entries.items():
for n in ('engine.', 'server.', 'tree.', 'checker.'):
if key.startswith(n):
msg.append('[%s] %s = %s' %
(section, key, value))
if msg:
msg.insert(0,
'The application mounted at %r contains the '
'following config entries, which are only allowed '
'in site-wide config. Move them to a [global] '
'section and pass them to cherrypy.config.update() '
'instead of tree.mount().' % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = 'The Application mounted at %r has an empty config.' % sn
if self.global_config_contained_paths:
msg += (' It looks like the config you passed to '
'cherrypy.config.update() contains application-'
'specific sections. You must explicitly pass '
'application config via '
'cherrypy.tree.mount(..., config=app_config)')
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for App config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith('[') or key.endswith(']'):
warnings.warn(
'The application mounted at %r has config '
'section names with extraneous brackets: %r. '
'Config *files* need brackets; config *dicts* '
'(e.g. passed to tree.mount) do not.' % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + '/dummy.html')
conf = request.config.get
if conf('tools.staticdir.on', False):
msg = ''
root = conf('tools.staticdir.root')
dir = conf('tools.staticdir.dir')
if dir is None:
msg = 'tools.staticdir.dir is not set.'
else:
fulldir = ''
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ('dir is an absolute path, even '
'though a root is provided.')
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += (
'\nIf you meant to serve the '
'filesystem folder at %r, remove the '
'leading slash from dir.' % (testdir,))
else:
if not root:
msg = (
'dir is a relative path and '
'no root provided.')
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = ('%r is not an absolute path.' % (
fulldir,))
if fulldir and not os.path.exists(fulldir):
if msg:
msg += '\n'
msg += ('%r (root + dir) is not an existing '
'filesystem path.' % fulldir)
if msg:
warnings.warn('%s\nsection: [%s]\nroot: %r\ndir: %r'
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k in conf:
if k in self.obsolete:
warnings.warn('%r is obsolete. Use %r instead.\n'
'section: [%s]' %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn('%r is deprecated. Use %r instead.\n'
'section: [%s]' %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn('%r is obsolete. Use %r instead.'
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn('%r is deprecated. Use %r instead.'
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ['wsgi']
ns.extend(app.toolboxes)
ns.extend(app.namespaces)
ns.extend(app.request_class.namespaces)
ns.extend(cherrypy.config.namespaces)
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith('/')
if is_path_section and isinstance(conf, dict):
for k in conf:
atoms = k.split('.')
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if atoms[0] == 'cherrypy' and atoms[1] in ns:
msg = (
'The config entry %r is invalid; '
'try %r instead.\nsection: [%s]'
% (k, '.'.join(atoms[1:]), section))
else:
msg = (
'The config entry %r is invalid, '
'because the %r config namespace '
'is unknown.\n'
'section: [%s]' % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == 'tools':
if atoms[1] not in dir(cherrypy.tools):
msg = (
'The config entry %r may be invalid, '
'because the %r tool was not found.\n'
'section: [%s]' % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + '.' + name] = vtype
traverse(cherrypy.request, 'request')
traverse(cherrypy.response, 'response')
traverse(cherrypy.server, 'server')
traverse(cherrypy.engine, 'engine')
traverse(cherrypy.log, 'log')
def _known_types(self, config):
msg = ('The config entry %r in section %r is of type %r, '
'which does not match the expected type %r.')
for section, conf in config.items():
if not isinstance(conf, dict):
conf = {section: conf}
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'.
See #711.
"""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
'cause problems on newer systems, since '
"'localhost' can map to either an IPv4 or an "
"IPv6 address. You should use '127.0.0.1' "
"or '[::1]' instead.")
| 14,584
|
Python
|
.py
| 289
| 32.294118
| 79
| 0.472927
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,570
|
_cpcompat.py
|
rembo10_headphones/lib/cherrypy/_cpcompat.py
|
"""Compatibility code for using CherryPy with various versions of Python.
To retain compatibility with older Python versions, this module provides a
useful abstraction over the differences between Python versions, sometimes by
preferring a newer idiom, sometimes an older one, and sometimes a custom one.
In particular, Python 2 uses str and '' for byte strings, while Python 3
uses str and '' for unicode strings. We will call each of these the 'native
string' type for each version. Because of this major difference, this module
provides
two functions: 'ntob', which translates native strings (of type 'str') into
byte strings regardless of Python version, and 'ntou', which translates native
strings to unicode strings.
Try not to use the compatibility functions 'ntob', 'ntou', 'tonative'.
They were created with Python 2.3-2.5 compatibility in mind.
Instead, use unicode literals (from __future__) and bytes literals
and their .encode/.decode methods as needed.
"""
import http.client
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
assert_native(n)
# In Python 3, the native string type is unicode
return n.encode(encoding)
def ntou(n, encoding='ISO-8859-1'):
"""Return the given native string as a unicode string with the given
encoding.
"""
assert_native(n)
# In Python 3, the native string type is unicode
return n
def tonative(n, encoding='ISO-8859-1'):
"""Return the given string as a native string in the given encoding."""
# In Python 3, the native string type is unicode
if isinstance(n, bytes):
return n.decode(encoding)
return n
def assert_native(n):
if not isinstance(n, str):
raise TypeError('n must be a native str (got %s)' % type(n).__name__)
# Some platforms don't expose HTTPSConnection, so handle it separately
HTTPSConnection = getattr(http.client, 'HTTPSConnection', None)
text_or_bytes = str, bytes
| 1,992
|
Python
|
.py
| 43
| 43
| 78
| 0.74806
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,571
|
__main__.py
|
rembo10_headphones/lib/cherrypy/__main__.py
|
"""CherryPy'd cherryd daemon runner."""
from cherrypy.daemon import run
__name__ == '__main__' and run()
| 107
|
Python
|
.py
| 3
| 34
| 39
| 0.676471
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,572
|
_cprequest.py
|
rembo10_headphones/lib/cherrypy/_cprequest.py
|
import sys
import time
import collections
import operator
from http.cookies import SimpleCookie, CookieError
import uuid
from more_itertools import consume
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy import _cpreqbody
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil, reprconf, encoding
class Hook(object):
"""A callback and its metadata: failsafe, priority, and kwargs."""
callback = None
"""
The bare callable that this Hook object is wrapping, which will
be called when the Hook is called."""
failsafe = False
"""
If True, the callback is guaranteed to run even if other callbacks
from the same call point raise exceptions."""
priority = 50
"""Defines the order of execution for a list of Hooks.
Priority numbers should be limited to the closed interval [0, 100],
but values outside this range are acceptable, as are fractional
values.
"""
kwargs = {}
"""
A set of keyword arguments that will be passed to the
callable on each call."""
def __init__(self, callback, failsafe=None, priority=None, **kwargs):
self.callback = callback
if failsafe is None:
failsafe = getattr(callback, 'failsafe', False)
self.failsafe = failsafe
if priority is None:
priority = getattr(callback, 'priority', 50)
self.priority = priority
self.kwargs = kwargs
def __lt__(self, other):
"""
Hooks sort by priority, ascending, such that
hooks of lower priority are run first.
"""
return self.priority < other.priority
def __call__(self):
"""Run self.callback(**self.kwargs)."""
return self.callback(**self.kwargs)
def __repr__(self):
cls = self.__class__
return ('%s.%s(callback=%r, failsafe=%r, priority=%r, %s)'
% (cls.__module__, cls.__name__, self.callback,
self.failsafe, self.priority,
', '.join(['%s=%r' % (k, v)
for k, v in self.kwargs.items()])))
class HookMap(dict):
"""A map of call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
d = dict.__new__(cls)
for p in points or []:
d[p] = []
return d
def __init__(self, *a, **kw):
pass
def attach(self, point, callback, failsafe=None, priority=None, **kwargs):
"""Append a new Hook made from the supplied arguments."""
self[point].append(Hook(callback, failsafe, priority, **kwargs))
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
self.run_hooks(iter(sorted(self[point])))
@classmethod
def run_hooks(cls, hooks):
"""Execute the indicated hooks, trapping errors.
Hooks with ``.failsafe == True`` are guaranteed to run
even if others at the same hookpoint fail. In this case,
log the failure and proceed on to the next hook. The only
way to stop all processing from one of these hooks is
to raise a BaseException like SystemExit or
KeyboardInterrupt and stop the whole server.
"""
assert isinstance(hooks, collections.abc.Iterator)
quiet_errors = (
cherrypy.HTTPError,
cherrypy.HTTPRedirect,
cherrypy.InternalRedirect,
)
safe = filter(operator.attrgetter('failsafe'), hooks)
for hook in hooks:
try:
hook()
except quiet_errors:
cls.run_hooks(safe)
raise
except Exception:
cherrypy.log(traceback=True, severity=40)
cls.run_hooks(safe)
raise
def __copy__(self):
newmap = self.__class__()
# We can't just use 'update' because we want copies of the
# mutable values (each is a list) as well.
for k, v in self.items():
newmap[k] = v[:]
return newmap
copy = __copy__
def __repr__(self):
cls = self.__class__
return '%s.%s(points=%r)' % (
cls.__module__,
cls.__name__,
list(self)
)
# Config namespace handlers
def hooks_namespace(k, v):
"""Attach bare hooks declared in config."""
# Use split again to allow multiple hooks for a single
# hookpoint per path (e.g. "hooks.before_handler.1").
# Little-known fact you only get from reading source ;)
hookpoint = k.split('.', 1)[0]
if isinstance(v, str):
v = cherrypy.lib.reprconf.attributes(v)
if not isinstance(v, Hook):
v = Hook(v)
cherrypy.serving.request.hooks[hookpoint].append(v)
def request_namespace(k, v):
"""Attach request attributes declared in config."""
# Provides config entries to set request.body attrs (like
# attempt_charsets).
if k[:5] == 'body.':
setattr(cherrypy.serving.request.body, k[5:], v)
else:
setattr(cherrypy.serving.request, k, v)
def response_namespace(k, v):
"""Attach response attributes declared in config."""
# Provides config entries to set default response headers
# http://cherrypy.dev/ticket/889
if k[:8] == 'headers.':
cherrypy.serving.response.headers[k.split('.', 1)[1]] = v
else:
setattr(cherrypy.serving.response, k, v)
def error_page_namespace(k, v):
"""Attach error pages declared in config."""
if k != 'default':
k = int(k)
cherrypy.serving.request.error_page[k] = v
hookpoints = ['on_start_resource', 'before_request_body',
'before_handler', 'before_finalize',
'on_end_resource', 'on_end_request',
'before_error_response', 'after_error_response']
class Request(object):
"""An HTTP request.
This object represents the metadata of an HTTP request message; that
is, it contains attributes which describe the environment in which
the request URL, headers, and body were sent (if you want tools to
interpret the headers and body, those are elsewhere, mostly in
Tools). This 'metadata' consists of socket data, transport
characteristics, and the Request-Line. This object also contains
data regarding the configuration in effect for the given URL, and
the execution plan for generating a response.
"""
prev = None
"""The previous Request object (if any).
This should be None unless we are processing an InternalRedirect.
"""
# Conversation/connection attributes
local = httputil.Host('127.0.0.1', 80)
'An httputil.Host(ip, port, hostname) object for the server socket.'
remote = httputil.Host('127.0.0.1', 1111)
'An httputil.Host(ip, port, hostname) object for the client socket.'
scheme = 'http'
"""The protocol used between client and server.
In most cases, this will be either 'http' or 'https'.
"""
server_protocol = 'HTTP/1.1'
"""
The HTTP version for which the HTTP server is at least
conditionally compliant."""
base = ''
"""The (scheme://host) portion of the requested URL.
In some cases (e.g. when proxying via mod_rewrite), this may contain
path segments which cherrypy.url uses when constructing url's, but
which otherwise are ignored by CherryPy. Regardless, this value MUST
NOT end in a slash.
"""
# Request-Line attributes
request_line = ''
"""The complete Request-Line received from the client.
This is a single string consisting of the request method, URI, and
protocol version (joined by spaces). Any final CRLF is removed.
"""
method = 'GET'
"""Indicates the HTTP method to be performed on the resource identified by
the Request-URI.
Common methods include GET, HEAD, POST, PUT, and DELETE. CherryPy
allows any extension method; however, various HTTP servers and
gateways may restrict the set of allowable methods. CherryPy
applications SHOULD restrict the set (on a per-URI basis).
"""
query_string = ''
"""
The query component of the Request-URI, a string of information to be
interpreted by the resource. The query portion of a URI follows the
path component, and is separated by a '?'. For example, the URI
'http://www.cherrypy.dev/wiki?a=3&b=4' has the query component,
'a=3&b=4'."""
query_string_encoding = 'utf8'
"""
The encoding expected for query string arguments after % HEX HEX decoding).
If a query string is provided that cannot be decoded with this encoding,
404 is raised (since technically it's a different URI). If you want
arbitrary encodings to not error, set this to 'Latin-1'; you can then
encode back to bytes and re-decode to whatever encoding you like later.
"""
protocol = (1, 1)
"""The HTTP protocol version corresponding to the set
of features which should be allowed in the response. If BOTH
the client's request message AND the server's level of HTTP
compliance is HTTP/1.1, this attribute will be the tuple (1, 1).
If either is 1.0, this attribute will be the tuple (1, 0).
Lower HTTP protocol versions are not explicitly supported."""
params = {}
"""
A dict which combines query string (GET) and request entity (POST)
variables. This is populated in two stages: GET params are added
before the 'on_start_resource' hook, and POST params are added
between the 'before_request_body' and 'before_handler' hooks.
"""
# Message attributes
header_list = []
"""A list of the HTTP request headers as (name, value) tuples.
In general, you should use request.headers (a dict) instead.
"""
headers = httputil.HeaderMap()
"""A dict-like object containing the request headers.
Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to :rfc:`2047` if necessary). See also:
httputil.HeaderMap, httputil.HeaderElement.
"""
cookie = SimpleCookie()
"""See help(Cookie)."""
rfile = None
"""
If the request included an entity (body), it will be available
as a stream in this attribute. However, the rfile will normally
be read for you between the 'before_request_body' hook and the
'before_handler' hook, and the resulting string is placed into
either request.params or the request.body attribute.
You may disable the automatic consumption of the rfile by setting
request.process_request_body to False, either in config for the desired
path, or in an 'on_start_resource' or 'before_request_body' hook.
WARNING: In almost every case, you should not attempt to read from the
rfile stream after CherryPy's automatic mechanism has read it. If you
turn off the automatic parsing of rfile, you should read exactly the
number of bytes specified in request.headers['Content-Length'].
Ignoring either of these warnings may result in a hung request thread
or in corruption of the next (pipelined) request.
"""
process_request_body = True
"""
If True, the rfile (if any) is automatically read and parsed,
and the result placed into request.params or request.body."""
methods_with_bodies = ('POST', 'PUT', 'PATCH')
"""
A sequence of HTTP methods for which CherryPy will automatically
attempt to read a body from the rfile. If you are going to change
this property, modify it on the configuration (recommended)
or on the "hook point" `on_start_resource`.
"""
body = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded'
or multipart, this will be None. Otherwise, this will be an instance
of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>` (which you
can .read()); this value is set between the 'before_request_body' and
'before_handler' hooks (assuming that process_request_body is True).
"""
# Dispatch attributes
dispatch = cherrypy.dispatch.Dispatcher()
"""
The object which looks up the 'page handler' callable and collects
config for the current request based on the path_info, other
request attributes, and the application architecture. The core
calls the dispatcher as early as possible, passing it a 'path_info'
argument.
The default dispatcher discovers the page handler by matching
path_info to a hierarchical arrangement of objects, starting at
request.app.root. See help(cherrypy.dispatch) for more information.
"""
script_name = ''
"""The 'mount point' of the application which is handling this request.
This attribute MUST NOT end in a slash. If the script_name refers to
the root of the URI, it MUST be an empty string (not "/").
"""
path_info = '/'
"""The 'relative path' portion of the Request-URI.
This is relative to the script_name ('mount point') of the
application which is handling this request.
"""
login = None
"""
When authentication is used during the request processing this is
set to 'False' if it failed and to the 'username' value if it succeeded.
The default 'None' implies that no authentication happened."""
# Note that cherrypy.url uses "if request.app:" to determine whether
# the call is during a real HTTP request or not. So leave this None.
app = None
"""The cherrypy.Application object which is handling this request."""
handler = None
"""
The function, method, or other callable which CherryPy will call to
produce the response. The discovery of the handler and the arguments
it will receive are determined by the request.dispatch object.
By default, the handler is discovered by walking a tree of objects
starting at request.app.root, and is then passed all HTTP params
(from the query string and POST body) as keyword arguments."""
toolmaps = {}
"""
A nested dict of all Toolboxes and Tools in effect for this request,
of the form: {Toolbox.namespace: {Tool.name: config dict}}."""
config = None
"""A flat dict of all configuration entries which apply to the current
request.
These entries are collected from global config, application config
(based on request.path_info), and from handler config (exactly how
is governed by the request.dispatch object in effect for this
request; by default, handler config can be attached anywhere in the
tree between request.app.root and the final handler, and inherits
downward).
"""
is_index = None
"""
This will be True if the current request is mapped to an 'index'
resource handler (also, a 'default' handler if path_info ends with
a slash). The value may be used to automatically redirect the
user-agent to a 'more canonical' URL which either adds or removes
the trailing slash. See cherrypy.tools.trailing_slash."""
hooks = HookMap(hookpoints)
"""A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}.
Each key is a str naming the hook point, and each value is a list
of hooks which will be called at that hook point during this request.
The list of hooks is generally populated as early as possible (mostly
from Tools specified in config), but may be extended at any time.
See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools.
"""
error_response = cherrypy.HTTPError(500).set_response
"""
The no-arg callable which will handle unexpected, untrapped errors
during request processing. This is not used for expected exceptions
(like NotFound, HTTPError, or HTTPRedirect) which are raised in
response to expected conditions (those should be customized either
via request.error_page or by overriding HTTPError.set_response).
By default, error_response uses HTTPError(500) to return a generic
error response to the user-agent."""
error_page = {}
"""A dict of {error code: response filename or callable} pairs.
The error code must be an int representing a given HTTP error code,
or the string 'default', which will be used if no matching entry is
found for a given numeric code.
If a filename is provided, the file should contain a Python string-
formatting template, and can expect by default to receive format
values with the mapping keys %(status)s, %(message)s, %(traceback)s,
and %(version)s. The set of format mappings can be extended by
overriding HTTPError.set_response.
If a callable is provided, it will be called by default with keyword
arguments 'status', 'message', 'traceback', and 'version', as for a
string-formatting template. The callable must return a string or
iterable of strings which will be set to response.body. It may also
override headers or perform any other processing.
If no entry is given for an error code, and no 'default' entry
exists, a default template will be used.
"""
show_tracebacks = True
"""
If True, unexpected errors encountered during request processing will
include a traceback in the response body."""
show_mismatched_params = True
"""
If True, mismatched parameters encountered during PageHandler invocation
processing will be included in the response body."""
throws = (KeyboardInterrupt, SystemExit, cherrypy.InternalRedirect)
"""The sequence of exceptions which Request.run does not trap."""
throw_errors = False
"""
If True, Request.run will not trap any errors (except HTTPRedirect and
HTTPError, which are more properly called 'exceptions', not errors)."""
closed = False
"""True once the close method has been called, False otherwise."""
stage = None
"""A string containing the stage reached in the request-handling process.
This is useful when debugging a live server with hung requests.
"""
unique_id = None
"""A lazy object generating and memorizing UUID4 on ``str()`` render."""
namespaces = reprconf.NamespaceSet(
**{'hooks': hooks_namespace,
'request': request_namespace,
'response': response_namespace,
'error_page': error_page_namespace,
'tools': cherrypy.tools,
})
def __init__(self, local_host, remote_host, scheme='http',
server_protocol='HTTP/1.1'):
"""Populate a new Request object.
local_host should be an httputil.Host object with the server
info. remote_host should be an httputil.Host object with the
client info. scheme should be a string, either "http" or
"https".
"""
self.local = local_host
self.remote = remote_host
self.scheme = scheme
self.server_protocol = server_protocol
self.closed = False
# Put a *copy* of the class error_page into self.
self.error_page = self.error_page.copy()
# Put a *copy* of the class namespaces into self.
self.namespaces = self.namespaces.copy()
self.stage = None
self.unique_id = LazyUUID4()
def close(self):
"""Run cleanup code.
(Core)
"""
if not self.closed:
self.closed = True
self.stage = 'on_end_request'
self.hooks.run('on_end_request')
self.stage = 'close'
def run(self, method, path, query_string, req_protocol, headers, rfile):
r"""Process the Request. (Core)
method, path, query_string, and req_protocol should be pulled directly
from the Request-Line (e.g. "GET /path?key=val HTTP/1.0").
path
This should be %XX-unquoted, but query_string should not be.
When using Python 2, they both MUST be byte strings,
not unicode strings.
When using Python 3, they both MUST be unicode strings,
not byte strings, and preferably not bytes \x00-\xFF
disguised as unicode.
headers
A list of (name, value) tuples.
rfile
A file-like object containing the HTTP request entity.
When run() is done, the returned object should have 3 attributes:
* status, e.g. "200 OK"
* header_list, a list of (name, value) tuples
* body, an iterable yielding strings
Consumer code (HTTP servers) should then access these response
attributes to build the outbound stream.
"""
response = cherrypy.serving.response
self.stage = 'run'
try:
self.error_response = cherrypy.HTTPError(500).set_response
self.method = method
path = path or '/'
self.query_string = query_string or ''
self.params = {}
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
sp = int(self.server_protocol[5]), int(self.server_protocol[7])
self.protocol = min(rp, sp)
response.headers.protocol = self.protocol
# Rebuild first line of the request (e.g. "GET /path HTTP/1.0").
url = path
if query_string:
url += '?' + query_string
self.request_line = '%s %s %s' % (method, url, req_protocol)
self.header_list = list(headers)
self.headers = httputil.HeaderMap()
self.rfile = rfile
self.body = None
self.cookie = SimpleCookie()
self.handler = None
# path_info should be the path from the
# app root (script_name) to the handler.
self.script_name = self.app.script_name
self.path_info = pi = path[len(self.script_name):]
self.stage = 'respond'
self.respond(pi)
except self.throws:
raise
except Exception:
if self.throw_errors:
raise
else:
# Failure in setup, error handler or finalize. Bypass them.
# Can't use handle_error because we may not have hooks yet.
cherrypy.log(traceback=True, severity=40)
if self.show_tracebacks:
body = format_exc()
else:
body = ''
r = bare_error(body)
response.output_status, response.header_list, response.body = r
if self.method == 'HEAD':
# HEAD requests MUST NOT return a message-body in the response.
response.body = []
try:
cherrypy.log.access()
except Exception:
cherrypy.log.error(traceback=True)
return response
def respond(self, path_info):
"""Generate a response for the resource at self.path_info.
(Core)
"""
try:
try:
try:
self._do_respond(path_info)
except (cherrypy.HTTPRedirect, cherrypy.HTTPError):
inst = sys.exc_info()[1]
inst.set_response()
self.stage = 'before_finalize (HTTPError)'
self.hooks.run('before_finalize')
cherrypy.serving.response.finalize()
finally:
self.stage = 'on_end_resource'
self.hooks.run('on_end_resource')
except self.throws:
raise
except Exception:
if self.throw_errors:
raise
self.handle_error()
def _do_respond(self, path_info):
response = cherrypy.serving.response
if self.app is None:
raise cherrypy.NotFound()
self.hooks = self.__class__.hooks.copy()
self.toolmaps = {}
# Get the 'Host' header, so we can HTTPRedirect properly.
self.stage = 'process_headers'
self.process_headers()
self.stage = 'get_resource'
self.get_resource(path_info)
self.body = _cpreqbody.RequestBody(
self.rfile, self.headers, request_params=self.params)
self.namespaces(self.config)
self.stage = 'on_start_resource'
self.hooks.run('on_start_resource')
# Parse the querystring
self.stage = 'process_query_string'
self.process_query_string()
# Process the body
if self.process_request_body:
if self.method not in self.methods_with_bodies:
self.process_request_body = False
self.stage = 'before_request_body'
self.hooks.run('before_request_body')
if self.process_request_body:
self.body.process()
# Run the handler
self.stage = 'before_handler'
self.hooks.run('before_handler')
if self.handler:
self.stage = 'handler'
response.body = self.handler()
# Finalize
self.stage = 'before_finalize'
self.hooks.run('before_finalize')
response.finalize()
def process_query_string(self):
"""Parse the query string into Python structures.
(Core)
"""
try:
p = httputil.parse_query_string(
self.query_string, encoding=self.query_string_encoding)
except UnicodeDecodeError:
raise cherrypy.HTTPError(
404, 'The given query string could not be processed. Query '
'strings for this resource must be encoded with %r.' %
self.query_string_encoding)
self.params.update(p)
def process_headers(self):
"""Parse HTTP header data into Python structures.
(Core)
"""
# Process the headers into self.headers
headers = self.headers
for name, value in self.header_list:
# Call title() now (and use dict.__method__(headers))
# so title doesn't have to be called twice.
name = name.title()
value = value.strip()
headers[name] = httputil.decode_TEXT_maybe(value)
# Some clients, notably Konquoror, supply multiple
# cookies on different lines with the same key. To
# handle this case, store all cookies in self.cookie.
if name == 'Cookie':
try:
self.cookie.load(value)
except CookieError as exc:
raise cherrypy.HTTPError(400, str(exc))
if not dict.__contains__(headers, 'Host'):
# All Internet-based HTTP/1.1 servers MUST respond with a 400
# (Bad Request) status code to any HTTP/1.1 request message
# which lacks a Host header field.
if self.protocol >= (1, 1):
msg = "HTTP/1.1 requires a 'Host' request header."
raise cherrypy.HTTPError(400, msg)
else:
headers['Host'] = httputil.SanitizedHost(dict.get(headers, 'Host'))
host = dict.get(headers, 'Host')
if not host:
host = self.local.name or self.local.ip
self.base = '%s://%s' % (self.scheme, host)
def get_resource(self, path):
"""Call a dispatcher (which sets self.handler and .config).
(Core)
"""
# First, see if there is a custom dispatch at this URI. Custom
# dispatchers can only be specified in app.config, not in _cp_config
# (since custom dispatchers may not even have an app.root).
dispatch = self.app.find_config(
path, 'request.dispatch', self.dispatch)
# dispatch() should set self.handler and self.config
dispatch(path)
def handle_error(self):
"""Handle the last unanticipated exception.
(Core)
"""
try:
self.hooks.run('before_error_response')
if self.error_response:
self.error_response()
self.hooks.run('after_error_response')
cherrypy.serving.response.finalize()
except cherrypy.HTTPRedirect:
inst = sys.exc_info()[1]
inst.set_response()
cherrypy.serving.response.finalize()
class ResponseBody(object):
"""The body of the HTTP response (the response entity)."""
unicode_err = ('Page handlers MUST return bytes. Use tools.encode '
'if you wish to return unicode.')
def __get__(self, obj, objclass=None):
if obj is None:
# When calling on the class instead of an instance...
return self
else:
return obj._body
def __set__(self, obj, value):
# Convert the given value to an iterable object.
if isinstance(value, str):
raise ValueError(self.unicode_err)
elif isinstance(value, list):
# every item in a list must be bytes...
if any(isinstance(item, str) for item in value):
raise ValueError(self.unicode_err)
obj._body = encoding.prepare_iter(value)
class Response(object):
"""An HTTP Response, including status, headers, and body."""
status = ''
"""The HTTP Status-Code and Reason-Phrase."""
header_list = []
"""A list of the HTTP response headers as (name, value) tuples.
In general, you should use response.headers (a dict) instead. This
attribute is generated from response.headers and is not valid until
after the finalize phase.
"""
headers = httputil.HeaderMap()
"""
A dict-like object containing the response headers. Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to :rfc:`2047` if necessary).
.. seealso:: classes :class:`HeaderMap`, :class:`HeaderElement`
"""
cookie = SimpleCookie()
"""See help(Cookie)."""
body = ResponseBody()
"""The body (entity) of the HTTP response."""
time = None
"""The value of time.time() when created.
Use in HTTP dates.
"""
stream = False
"""If False, buffer the response body."""
def __init__(self):
self.status = None
self.header_list = None
self._body = []
self.time = time.time()
self.headers = httputil.HeaderMap()
# Since we know all our keys are titled strings, we can
# bypass HeaderMap.update and get a big speed boost.
dict.update(self.headers, {
'Content-Type': 'text/html',
'Server': 'CherryPy/' + cherrypy.__version__,
'Date': httputil.HTTPDate(self.time),
})
self.cookie = SimpleCookie()
def collapse_body(self):
"""Collapse self.body to a single string; replace it and return it."""
new_body = b''.join(self.body)
self.body = new_body
return new_body
def _flush_body(self):
"""Discard self.body but consume any generator such that any
finalization can occur, such as is required by caching.tee_output()."""
consume(iter(self.body))
def finalize(self):
"""Transform headers (and cookies) into self.header_list.
(Core)
"""
try:
code, reason, _ = httputil.valid_status(self.status)
except ValueError:
raise cherrypy.HTTPError(500, sys.exc_info()[1].args[0])
headers = self.headers
self.status = '%s %s' % (code, reason)
self.output_status = ntob(str(code), 'ascii') + \
b' ' + headers.encode(reason)
if self.stream:
# The upshot: wsgiserver will chunk the response if
# you pop Content-Length (or set it explicitly to None).
# Note that lib.static sets C-L to the file's st_size.
if dict.get(headers, 'Content-Length') is None:
dict.pop(headers, 'Content-Length', None)
elif code < 200 or code in (204, 205, 304):
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body."
dict.pop(headers, 'Content-Length', None)
self._flush_body()
self.body = b''
else:
# Responses which are not streamed should have a Content-Length,
# but allow user code to set Content-Length if desired.
if dict.get(headers, 'Content-Length') is None:
content = self.collapse_body()
dict.__setitem__(headers, 'Content-Length', len(content))
# Transform our header dict into a list of tuples.
self.header_list = h = headers.output()
cookie = self.cookie.output()
if cookie:
for line in cookie.split('\r\n'):
name, value = line.split(': ', 1)
if isinstance(name, str):
name = name.encode('ISO-8859-1')
if isinstance(value, str):
value = headers.encode(value)
h.append((name, value))
class LazyUUID4(object):
def __str__(self):
"""Return UUID4 and keep it for future calls."""
return str(self.uuid4)
@property
def uuid4(self):
"""Provide unique id on per-request basis using UUID4.
It's evaluated lazily on render.
"""
try:
self._uuid4
except AttributeError:
# evaluate on first access
self._uuid4 = uuid.uuid4()
return self._uuid4
| 34,559
|
Python
|
.py
| 778
| 35.807198
| 79
| 0.632796
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,573
|
_cplogging.py
|
rembo10_headphones/lib/cherrypy/_cplogging.py
|
"""
Simple config
=============
Although CherryPy uses the :mod:`Python logging module <logging>`, it does so
behind the scenes so that simple logging is simple, but complicated logging
is still possible. "Simple" logging means that you can log to the screen
(i.e. console/stdout) or to a file, and that you can easily have separate
error and access log files.
Here are the simplified logging settings. You use these by adding lines to
your config file or dict. You should set these at either the global level or
per application (see next), but generally not both.
* ``log.screen``: Set this to True to have both "error" and "access" messages
printed to stdout.
* ``log.access_file``: Set this to an absolute filename where you want
"access" messages written.
* ``log.error_file``: Set this to an absolute filename where you want "error"
messages written.
Many events are automatically logged; to log your own application events, call
:func:`cherrypy.log`.
Architecture
============
Separate scopes
---------------
CherryPy provides log managers at both the global and application layers.
This means you can have one set of logging rules for your entire site,
and another set of rules specific to each application. The global log
manager is found at :func:`cherrypy.log`, and the log manager for each
application is found at :attr:`app.log<cherrypy._cptree.Application.log>`.
If you're inside a request, the latter is reachable from
``cherrypy.request.app.log``; if you're outside a request, you'll have to
obtain a reference to the ``app``: either the return value of
:func:`tree.mount()<cherrypy._cptree.Tree.mount>` or, if you used
:func:`quickstart()<cherrypy.quickstart>` instead, via
``cherrypy.tree.apps['/']``.
By default, the global logs are named "cherrypy.error" and "cherrypy.access",
and the application logs are named "cherrypy.error.2378745" and
"cherrypy.access.2378745" (the number is the id of the Application object).
This means that the application logs "bubble up" to the site logs, so if your
application has no log handlers, the site-level handlers will still log the
messages.
Errors vs. Access
-----------------
Each log manager handles both "access" messages (one per HTTP request) and
"error" messages (everything else). Note that the "error" log is not just for
errors! The format of access messages is highly formalized, but the error log
isn't--it receives messages from a variety of sources (including full error
tracebacks, if enabled).
If you are logging the access log and error log to the same source, then there
is a possibility that a specially crafted error message may replicate an access
log message as described in CWE-117. In this case it is the application
developer's responsibility to manually escape data before
using CherryPy's log()
functionality, or they may create an application that is vulnerable to CWE-117.
This would be achieved by using a custom handler escape any special characters,
and attached as described below.
Custom Handlers
===============
The simple settings above work by manipulating Python's standard :mod:`logging`
module. So when you need something more complex, the full power of the standard
module is yours to exploit. You can borrow or create custom handlers, formats,
filters, and much more. Here's an example that skips the standard FileHandler
and uses a RotatingFileHandler instead:
::
#python
log = app.log
# Remove the default FileHandlers if present.
log.error_file = ""
log.access_file = ""
maxBytes = getattr(log, "rot_maxBytes", 10000000)
backupCount = getattr(log, "rot_backupCount", 1000)
# Make a new RotatingFileHandler for the error log.
fname = getattr(log, "rot_error_file", "error.log")
h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount)
h.setLevel(DEBUG)
h.setFormatter(_cplogging.logfmt)
log.error_log.addHandler(h)
# Make a new RotatingFileHandler for the access log.
fname = getattr(log, "rot_access_file", "access.log")
h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount)
h.setLevel(DEBUG)
h.setFormatter(_cplogging.logfmt)
log.access_log.addHandler(h)
The ``rot_*`` attributes are pulled straight from the application log object.
Since "log.*" config entries simply set attributes on the log object, you can
add custom attributes to your heart's content. Note that these handlers are
used ''instead'' of the default, simple handlers outlined above (so don't set
the "log.error_file" config entry, for example).
"""
import datetime
import logging
import os
import sys
import cherrypy
from cherrypy import _cperror
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
logging.Logger.manager.emittedNoHandlerWarning = 1
logfmt = logging.Formatter('%(message)s')
class NullHandler(logging.Handler):
"""A no-op logging handler to silence the logging.lastResort handler."""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
class LogManager(object):
"""An object to assist both simple and advanced logging.
``cherrypy.log`` is an instance of this class.
"""
appid = None
"""The id() of the Application object which owns this log manager.
If this is a global log manager, appid is None.
"""
error_log = None
"""The actual :class:`logging.Logger` instance for error messages."""
access_log = None
"""The actual :class:`logging.Logger` instance for access messages."""
access_log_format = '{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}"'
logger_root = None
"""The "top-level" logger name.
This string will be used as the first segment in the Logger names.
The default is "cherrypy", for example, in which case the Logger names
will be of the form::
cherrypy.error.<appid>
cherrypy.access.<appid>
"""
def __init__(self, appid=None, logger_root='cherrypy'):
self.logger_root = logger_root
self.appid = appid
if appid is None:
self.error_log = logging.getLogger('%s.error' % logger_root)
self.access_log = logging.getLogger('%s.access' % logger_root)
else:
self.error_log = logging.getLogger(
'%s.error.%s' % (logger_root, appid))
self.access_log = logging.getLogger(
'%s.access.%s' % (logger_root, appid))
self.error_log.setLevel(logging.INFO)
self.access_log.setLevel(logging.INFO)
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
self.error_log.addHandler(NullHandler())
self.access_log.addHandler(NullHandler())
cherrypy.engine.subscribe('graceful', self.reopen_files)
def reopen_files(self):
"""Close and reopen all file handlers."""
for log in (self.error_log, self.access_log):
for h in log.handlers:
if isinstance(h, logging.FileHandler):
h.acquire()
h.stream.close()
h.stream = open(h.baseFilename, h.mode)
h.release()
def error(self, msg='', context='', severity=logging.INFO,
traceback=False):
"""Write the given ``msg`` to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
If ``traceback`` is True, the traceback of the current exception
(if any) will be appended to ``msg``.
"""
exc_info = None
if traceback:
exc_info = _cperror._exc_info()
self.error_log.log(
severity,
' '.join((self.time(), context, msg)),
exc_info=exc_info,
)
def __call__(self, *args, **kwargs):
"""An alias for ``error``."""
return self.error(*args, **kwargs)
def access(self):
"""Write to the access log (in Apache/NCSA Combined Log format).
See the
`apache documentation
<http://httpd.apache.org/docs/current/logs.html#combined>`_
for format details.
CherryPy calls this automatically for you. Note there are no arguments;
it collects the data itself from
:class:`cherrypy.request<cherrypy._cprequest.Request>`.
Like Apache started doing in 2.0.46, non-printable and other special
characters in %r (and we expand that to all parts) are escaped using
\\xhh sequences, where hh stands for the hexadecimal representation
of the raw byte. Exceptions from this rule are " and \\, which are
escaped by prepending a backslash, and all whitespace characters,
which are written in their C-style notation (\\n, \\t, etc).
"""
request = cherrypy.serving.request
remote = request.remote
response = cherrypy.serving.response
outheaders = response.headers
inheaders = request.headers
if response.output_status is None:
status = '-'
else:
status = response.output_status.split(b' ', 1)[0]
status = status.decode('ISO-8859-1')
atoms = {'h': remote.name or remote.ip,
'l': '-',
'u': getattr(request, 'login', None) or '-',
't': self.time(),
'r': request.request_line,
's': status,
'b': dict.get(outheaders, 'Content-Length', '') or '-',
'f': dict.get(inheaders, 'Referer', ''),
'a': dict.get(inheaders, 'User-Agent', ''),
'o': dict.get(inheaders, 'Host', '-'),
'i': request.unique_id,
'z': LazyRfc3339UtcTime(),
}
for k, v in atoms.items():
if not isinstance(v, str):
v = str(v)
v = v.replace('"', '\\"').encode('utf8')
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[2:-1]
# in python 3.0 the repr of bytes (as returned by encode)
# uses double \'s. But then the logger escapes them yet, again
# resulting in quadruple slashes. Remove the extra one here.
v = v.replace('\\\\', '\\')
# Escape double-quote.
atoms[k] = v
try:
self.access_log.log(
logging.INFO, self.access_log_format.format(**atoms))
except Exception:
self(traceback=True)
def time(self):
"""Return now() in Apache Common Log Format (no timezone)."""
now = datetime.datetime.now()
monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
month = monthnames[now.month - 1].capitalize()
return ('[%02d/%s/%04d:%02d:%02d:%02d]' %
(now.day, month, now.year, now.hour, now.minute, now.second))
def _get_builtin_handler(self, log, key):
for h in log.handlers:
if getattr(h, '_cpbuiltin', None) == key:
return h
# ------------------------- Screen handlers ------------------------- #
def _set_screen_handler(self, log, enable, stream=None):
h = self._get_builtin_handler(log, 'screen')
if enable:
if not h:
if stream is None:
stream = sys.stderr
h = logging.StreamHandler(stream)
h.setFormatter(logfmt)
h._cpbuiltin = 'screen'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def screen(self):
"""Turn stderr/stdout logging on or off.
If you set this to True, it'll add the appropriate StreamHandler
for you. If you set it to False, it will remove the handler.
"""
h = self._get_builtin_handler
has_h = h(self.error_log, 'screen') or h(self.access_log, 'screen')
return bool(has_h)
@screen.setter
def screen(self, newvalue):
self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr)
self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout)
# -------------------------- File handlers -------------------------- #
def _add_builtin_file_handler(self, log, fname):
h = logging.FileHandler(fname)
h.setFormatter(logfmt)
h._cpbuiltin = 'file'
log.addHandler(h)
def _set_file_handler(self, log, filename):
h = self._get_builtin_handler(log, 'file')
if filename:
if h:
if h.baseFilename != os.path.abspath(filename):
h.close()
log.handlers.remove(h)
self._add_builtin_file_handler(log, filename)
else:
self._add_builtin_file_handler(log, filename)
else:
if h:
h.close()
log.handlers.remove(h)
@property
def error_file(self):
"""The filename for self.error_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.error_log, 'file')
if h:
return h.baseFilename
return ''
@error_file.setter
def error_file(self, newvalue):
self._set_file_handler(self.error_log, newvalue)
@property
def access_file(self):
"""The filename for self.access_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.access_log, 'file')
if h:
return h.baseFilename
return ''
@access_file.setter
def access_file(self, newvalue):
self._set_file_handler(self.access_log, newvalue)
# ------------------------- WSGI handlers ------------------------- #
def _set_wsgi_handler(self, log, enable):
h = self._get_builtin_handler(log, 'wsgi')
if enable:
if not h:
h = WSGIErrorHandler()
h.setFormatter(logfmt)
h._cpbuiltin = 'wsgi'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def wsgi(self):
"""Write errors to wsgi.errors.
If you set this to True, it'll add the appropriate
:class:`WSGIErrorHandler<cherrypy._cplogging.WSGIErrorHandler>` for you
(which writes errors to ``wsgi.errors``).
If you set it to False, it will remove the handler.
"""
return bool(self._get_builtin_handler(self.error_log, 'wsgi'))
@wsgi.setter
def wsgi(self, newvalue):
self._set_wsgi_handler(self.error_log, newvalue)
class WSGIErrorHandler(logging.Handler):
"A handler class which writes logging records to environ['wsgi.errors']."
def flush(self):
"""Flushes the stream."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
stream.flush()
def emit(self, record):
"""Emit a record."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
try:
msg = self.format(record)
fs = '%s\n'
import types
# if no unicode support...
if not hasattr(types, 'UnicodeType'):
stream.write(fs % msg)
else:
try:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode('UTF-8'))
self.flush()
except Exception:
self.handleError(record)
class LazyRfc3339UtcTime(object):
def __str__(self):
"""Return datetime in RFC3339 UTC Format."""
iso_formatted_now = datetime.datetime.now(
datetime.timezone.utc,
).isoformat('T')
return f'{iso_formatted_now!s}Z'
| 16,482
|
Python
|
.py
| 374
| 35.529412
| 79
| 0.619633
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,574
|
_cpwsgi_server.py
|
rembo10_headphones/lib/cherrypy/_cpwsgi_server.py
|
"""WSGI server interface (see PEP 333).
This adds some CP-specific bits to the framework-agnostic cheroot
package.
"""
import sys
import cheroot.wsgi
import cheroot.server
import cherrypy
class CPWSGIHTTPRequest(cheroot.server.HTTPRequest):
"""Wrapper for cheroot.server.HTTPRequest.
This is a layer, which preserves URI parsing mode like it which was
before Cheroot v5.8.0.
"""
def __init__(self, server, conn):
"""Initialize HTTP request container instance.
Args:
server (cheroot.server.HTTPServer):
web server object receiving this request
conn (cheroot.server.HTTPConnection):
HTTP connection object for this request
"""
super(CPWSGIHTTPRequest, self).__init__(
server, conn, proxy_mode=True
)
class CPWSGIServer(cheroot.wsgi.Server):
"""Wrapper for cheroot.wsgi.Server.
cheroot has been designed to not reference CherryPy in any way, so
that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can set our own mount points from
cherrypy.tree and apply some attributes from config ->
cherrypy.server -> wsgi.Server.
"""
fmt = 'CherryPy/{cherrypy.__version__} {cheroot.wsgi.Server.version}'
version = fmt.format(**globals())
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPWSGIServer instance.
Args:
server_adapter (cherrypy._cpserver.Server): ...
"""
self.server_adapter = server_adapter
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0
)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0
)
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
self.wsgi_version = self.server_adapter.wsgi_version
super(CPWSGIServer, self).__init__(
server_adapter.bind_addr, cherrypy.tree,
self.server_adapter.thread_pool,
server_name,
max=self.server_adapter.thread_pool_max,
request_queue_size=self.server_adapter.socket_queue_size,
timeout=self.server_adapter.socket_timeout,
shutdown_timeout=self.server_adapter.shutdown_timeout,
accepted_queue_size=self.server_adapter.accepted_queue_size,
accepted_queue_timeout=self.server_adapter.accepted_queue_timeout,
peercreds_enabled=self.server_adapter.peercreds,
peercreds_resolve_enabled=self.server_adapter.peercreds_resolve,
)
self.ConnectionClass.RequestHandlerClass = CPWSGIHTTPRequest
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
if sys.version_info >= (3, 0):
ssl_module = self.server_adapter.ssl_module or 'builtin'
else:
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.stats['Enabled'] = getattr(
self.server_adapter, 'statistics', False)
def error_log(self, msg='', level=20, traceback=False):
"""Write given message to the error log."""
cherrypy.engine.log(msg, level, traceback)
| 4,190
|
Python
|
.py
| 90
| 36.588889
| 78
| 0.652611
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,575
|
_cpnative_server.py
|
rembo10_headphones/lib/cherrypy/_cpnative_server.py
|
"""Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from ._cpcompat import tonative
class NativeGateway(cheroot.server.Gateway):
"""Native gateway implementation allowing to bypass WSGI."""
recursive = False
def respond(self):
"""Obtain response from CherryPy machinery and then send it."""
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr # FIXME: handle UNIX sockets
local = tonative(local[0]), local[1]
local = httputil.Host(local[0], local[1], '')
remote = tonative(req.conn.remote_addr), req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = tonative(req.scheme)
sn = cherrypy.tree.script_name(tonative(req.uri or '/'))
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = tonative(req.method)
path = tonative(req.path)
qs = tonative(req.qs or '')
headers = (
(tonative(h), tonative(v))
for h, v in req.inheaders.items()
)
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(
method, path, qs,
tonative(req.request_protocol),
headers, rfile,
)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except Exception:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
"""Send response to HTTP request."""
req = self.req
# Set response status
req.status = status or b'500 Server Error'
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way, so
that it can be used in other frameworks and applications. Therefore,
we wrap it here, so we can apply some attributes from config ->
cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPHTTPServer."""
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
| 6,677
|
Python
|
.py
| 143
| 30.286713
| 78
| 0.52389
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,576
|
_cpwsgi.py
|
rembo10_headphones/lib/cherrypy/_cpwsgi.py
|
"""WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
whatever the type of "" is. For Python 2, that's a byte string; for
Python 3, it's a unicode string. But PEP 3333 says: "even if Python's
str type is actually Unicode "under the hood", the content of native
strings must still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
import io
import cherrypy as _cherrypy
from cherrypy._cpcompat import ntou
from cherrypy import _cperror
from cherrypy.lib import httputil
from cherrypy.lib import is_closable_iterator
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ.
"""
env1x = {}
url_encoding = environ[ntou('wsgi.url_encoding')]
for k, v in environ.copy().items():
if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:
v = v.encode(url_encoding)
elif isinstance(v, str):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(
RootApp,
domains={
'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
},
)
cherrypy.tree.graft(vhost)
"""
default = None
"""Required.
The default WSGI application.
"""
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying."""
domains = {}
"""A dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict, and,
if a match is found, the corresponding WSGI application will be
called instead of the default. Note that you often need separate
entries for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get('HTTP_X_FORWARDED_HOST', domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect:
ir = _sys.exc_info()[1]
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += '?' + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to
# already
new_uri = sn + ir.path
if ir.query_string:
new_uri += '?' + ir.query_string
if new_uri in redirections:
ir.request.close()
tmpl = (
'InternalRedirector visited the same URL twice: %r'
)
raise RuntimeError(tmpl % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = 'GET'
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = io.BytesIO()
environ['CONTENT_LENGTH'] = '0'
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(
self.nextapp,
environ,
start_response,
self.throws
)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(
self.nextapp, self.environ, self.start_response,
)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def __next__(self):
return self.trap(next, self.iter_response)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except Exception:
tb = _cperror.format_exc()
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ''
s, h, b = _cperror.bare_error(tb)
if True:
# What fun.
s = s.decode('ISO-8859-1')
h = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h
]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except Exception:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return b''.join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
self.cpapp = cpapp
try:
self.environ = environ
self.run()
r = _cherrypy.serving.response
outstatus = r.output_status
if not isinstance(outstatus, bytes):
raise TypeError('response.output_status is not a byte string.')
outheaders = []
for k, v in r.header_list:
if not isinstance(k, bytes):
tmpl = 'response.header_list key %r is not a byte string.'
raise TypeError(tmpl % k)
if not isinstance(v, bytes):
tmpl = (
'response.header_list value %r is not a byte string.'
)
raise TypeError(tmpl % v)
outheaders.append((k, v))
if True:
# According to PEP 3333, when using Python 3, the response
# status and headers must be bytes masquerading as unicode;
# that is, they must be of type "str" but are restricted to
# code points in the "latin-1" set.
outstatus = outstatus.decode('ISO-8859-1')
outheaders = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in outheaders
]
self.iter_response = iter(r.body)
self.write = start_response(outstatus, outheaders)
except BaseException:
self.close()
raise
def __iter__(self):
return self
def __next__(self):
return next(self.iter_response)
def close(self):
"""Close and de-reference the current request and response.
(Core)
"""
streaming = _cherrypy.serving.response.stream
self.cpapp.release_serving()
# We avoid the expense of examining the iterator to see if it's
# closable unless we are streaming the response, as that's the
# only situation where we are going to have an iterator which
# may not have been exhausted yet.
if streaming and is_closable_iterator(self.iter_response):
iter_close = self.iter_response.close
try:
iter_close()
except Exception:
_cherrypy.log(traceback=True, severity=40)
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host(
'',
int(env('SERVER_PORT', 80) or -1),
env('SERVER_NAME', ''),
)
remote = httputil.Host(
env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''),
)
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1')
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(
self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''),
)
qs = self.environ.get('QUERY_STRING', '')
path, qs = self.recode_path_qs(path, qs) or (path, qs)
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {
'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def recode_path_qs(self, path, qs):
# This isn't perfect; if the given PATH_INFO is in the
# wrong encoding, it may fail to match the appropriate config
# section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(
self.environ.get('PATH_INFO', ''),
'request.uri_encoding', 'utf-8',
)
if new_enc.lower() == old_enc.lower():
return
# Even though the path and qs are unicode, the WSGI server
# is required by PEP 3333 to coerce them to ISO-8859-1
# masquerading as unicode. So we have to encode back to
# bytes and then decode again using the "correct" encoding.
try:
return (
path.encode(old_enc).decode(new_enc),
qs.encode(old_enc).decode(new_enc),
)
except (UnicodeEncodeError, UnicodeDecodeError):
# Just pass them through without transcoding and hope.
pass
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == 'HTTP_':
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace('_', '-')
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [
('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
"""A list of (name, wsgiapp) pairs.
Each 'wsgiapp' MUST be a constructor that takes an initial,
positional 'nextapp' argument, plus optional keyword arguments, and
returns a WSGI application (that takes environ and start_response
arguments). The 'name' can be any you choose, and will correspond to
keys in self.config.
"""
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self."""
config = {}
"""A dict whose keys match names listed in the pipeline.
Each value is a further dict which will be passed to the
corresponding named WSGI callable (from the pipeline) as keyword
arguments.
"""
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain.
"""
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead, so
that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == 'pipeline':
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == 'response_class':
self.response_class = v
else:
name, arg = k.split('.', 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| 16,438
|
Python
|
.py
| 379
| 32.598945
| 79
| 0.584455
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,577
|
_cperror.py
|
rembo10_headphones/lib/cherrypy/_cperror.py
|
"""Exception classes for CherryPy.
CherryPy provides (and uses) exceptions for declaring that the HTTP response
should be a status other than the default "200 OK". You can ``raise`` them like
normal Python exceptions. You can also call them and they will raise
themselves; this means you can set an
:class:`HTTPError<cherrypy._cperror.HTTPError>`
or :class:`HTTPRedirect<cherrypy._cperror.HTTPRedirect>` as the
:attr:`request.handler<cherrypy._cprequest.Request.handler>`.
.. _redirectingpost:
Redirecting POST
================
When you GET a resource and are redirected by the server to another Location,
there's generally no problem since GET is both a "safe method" (there should
be no side-effects) and an "idempotent method" (multiple calls are no different
than a single call).
POST, however, is neither safe nor idempotent--if you
charge a credit card, you don't want to be charged twice by a redirect!
For this reason, *none* of the 3xx responses permit a user-agent (browser) to
resubmit a POST on redirection without first confirming the action with the
user:
===== ================================= ===========
300 Multiple Choices Confirm with the user
301 Moved Permanently Confirm with the user
302 Found (Object moved temporarily) Confirm with the user
303 See Other GET the new URI; no confirmation
304 Not modified for conditional GET only;
POST should not raise this error
305 Use Proxy Confirm with the user
307 Temporary Redirect Confirm with the user
308 Permanent Redirect No confirmation
===== ================================= ===========
However, browsers have historically implemented these restrictions poorly;
in particular, many browsers do not force the user to confirm 301, 302
or 307 when redirecting POST. For this reason, CherryPy defaults to 303,
which most user-agents appear to have implemented correctly. Therefore, if
you raise HTTPRedirect for a POST request, the user-agent will most likely
attempt to GET the new URI (without asking for confirmation from the user).
We realize this is confusing for developers, but it's the safest thing we
could do. You are of course free to raise ``HTTPRedirect(uri, status=302)``
or any other 3xx status if you know what you're doing, but given the
environment, we couldn't let any of those be the default.
Custom Error Handling
=====================
.. image:: /refman/cperrors.gif
Anticipated HTTP responses
--------------------------
The 'error_page' config namespace can be used to provide custom HTML output for
expected responses (like 404 Not Found). Supply a filename from which the
output will be read. The contents will be interpolated with the values
%(status)s, %(message)s, %(traceback)s, and %(version)s using plain old Python
`string formatting
<http://docs.python.org/2/library/stdtypes.html#string-formatting-operations>`_.
::
_cp_config = {
'error_page.404': os.path.join(localDir, "static/index.html")
}
Beginning in version 3.1, you may also provide a function or other callable as
an error_page entry. It will be passed the same status, message, traceback and
version arguments that are interpolated into templates::
def error_page_402(status, message, traceback, version):
return "Error %s - Well, I'm very sorry but you haven't paid!" % status
cherrypy.config.update({'error_page.402': error_page_402})
Also in 3.1, in addition to the numbered error codes, you may also supply
"error_page.default" to handle all codes which do not have their own error_page
entry.
Unanticipated errors
--------------------
CherryPy also has a generic error handling mechanism: whenever an unanticipated
error occurs in your code, it will call
:func:`Request.error_response<cherrypy._cprequest.Request.error_response>` to
set the response status, headers, and body. By default, this is the same
output as
:class:`HTTPError(500) <cherrypy._cperror.HTTPError>`. If you want to provide
some other behavior, you generally replace "request.error_response".
Here is some sample code that shows how to display a custom error message and
send an e-mail containing the error::
from cherrypy import _cperror
def handle_error():
cherrypy.response.status = 500
cherrypy.response.body = [
"<html><body>Sorry, an error occurred</body></html>"
]
sendMail('error@domain.com',
'Error in your web app',
_cperror.format_exc())
@cherrypy.config(**{'request.error_response': handle_error})
class Root:
pass
Note that you have to explicitly set
:attr:`response.body <cherrypy._cprequest.Response.body>`
and not simply return an error message as a result.
"""
import io
import contextlib
import urllib.parse
from sys import exc_info as _exc_info
from traceback import format_exception as _format_exception
from xml.sax import saxutils
import html
from more_itertools import always_iterable
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy._cpcompat import tonative
from cherrypy._helper import classproperty
from cherrypy.lib import httputil as _httputil
class CherryPyException(Exception):
"""A base class for CherryPy exceptions."""
pass
class InternalRedirect(CherryPyException):
"""Exception raised to switch to the handler for a different URL.
This exception will redirect processing to another path within the
site (without informing the client). Provide the new path as an
argument when raising the exception. Provide any params in the
querystring for the new URL.
"""
def __init__(self, path, query_string=''):
self.request = cherrypy.serving.request
self.query_string = query_string
if '?' in path:
# Separate any params included in the path
path, self.query_string = path.split('?', 1)
# Note that urljoin will "do the right thing" whether url is:
# 1. a URL relative to root (e.g. "/dummy")
# 2. a URL relative to the current path
# Note that any query string will be discarded.
path = urllib.parse.urljoin(self.request.path_info, path)
# Set a 'path' member attribute so that code which traps this
# error can have access to it.
self.path = path
CherryPyException.__init__(self, path, self.query_string)
class HTTPRedirect(CherryPyException):
"""Exception raised when the request should be redirected.
This exception will force a HTTP redirect to the URL or URL's you give it.
The new URL must be passed as the first argument to the Exception,
e.g., HTTPRedirect(newUrl). Multiple URLs are allowed in a list.
If a URL is absolute, it will be used as-is. If it is relative, it is
assumed to be relative to the current cherrypy.request.path_info.
If one of the provided URL is a unicode object, it will be encoded
using the default encoding or the one passed in parameter.
There are multiple types of redirect, from which you can select via the
``status`` argument. If you do not provide a ``status`` arg, it defaults to
303 (or 302 if responding with HTTP/1.0).
Examples::
raise cherrypy.HTTPRedirect("")
raise cherrypy.HTTPRedirect("/abs/path", 307)
raise cherrypy.HTTPRedirect(["path1", "path2?a=1&b=2"], 301)
See :ref:`redirectingpost` for additional caveats.
"""
urls = None
"""The list of URL's to emit."""
encoding = 'utf-8'
"""The encoding when passed urls are not native strings."""
def __init__(self, urls, status=None, encoding=None):
self.urls = abs_urls = [
# Note that urljoin will "do the right thing" whether url is:
# 1. a complete URL with host (e.g. "http://www.example.com/test")
# 2. a URL relative to root (e.g. "/dummy")
# 3. a URL relative to the current path
# Note that any query string in cherrypy.request is discarded.
urllib.parse.urljoin(
cherrypy.url(),
tonative(url, encoding or self.encoding),
)
for url in always_iterable(urls)
]
status = (
int(status)
if status is not None
else self.default_status
)
if not 300 <= status <= 399:
raise ValueError('status must be between 300 and 399.')
CherryPyException.__init__(self, abs_urls, status)
@classproperty
def default_status(cls):
"""The default redirect status for the request.
RFC 2616 indicates a 301 response code fits our goal; however,
browser support for 301 is quite messy. Use 302/303 instead. See
http://www.alanflavell.org.uk/www/post-redirect.html
"""
return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302
@property
def status(self):
"""The integer HTTP status code to emit."""
_, status = self.args[:2]
return status
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent
self.
CherryPy uses this internally, but you can also use it to create
an HTTPRedirect object and set its output without *raising* the
exception.
"""
response = cherrypy.serving.response
response.status = status = self.status
if status in (300, 301, 302, 303, 307, 308):
response.headers['Content-Type'] = 'text/html;charset=utf-8'
# "The ... URI SHOULD be given by the Location field
# in the response."
response.headers['Location'] = self.urls[0]
# "Unless the request method was HEAD, the entity of the response
# SHOULD contain a short hypertext note with a hyperlink to the
# new URI(s)."
msg = {
300: 'This resource can be found at ',
301: 'This resource has permanently moved to ',
302: 'This resource resides temporarily at ',
303: 'This resource can be found at ',
307: 'This resource has moved temporarily to ',
308: 'This resource has been moved to ',
}[status]
msg += '<a href=%s>%s</a>.'
msgs = [
msg % (saxutils.quoteattr(u), html.escape(u, quote=False))
for u in self.urls
]
response.body = ntob('<br />\n'.join(msgs), 'utf-8')
# Previous code may have set C-L, so we have to reset it
# (allow finalize to set it).
response.headers.pop('Content-Length', None)
elif status == 304:
# Not Modified.
# "The response MUST include the following header fields:
# Date, unless its omission is required by section 14.18.1"
# The "Date" header should have been set in Response.__init__
# "...the response SHOULD NOT include other entity-headers."
for key in ('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Location', 'Content-MD5',
'Content-Range', 'Content-Type', 'Expires',
'Last-Modified'):
if key in response.headers:
del response.headers[key]
# "The 304 response MUST NOT contain a message-body."
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
elif status == 305:
# Use Proxy.
# self.urls[0] should be the URI of the proxy.
response.headers['Location'] = ntob(self.urls[0], 'utf-8')
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
else:
raise ValueError('The %s status code is unknown.' % status)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
def clean_headers(status):
"""Remove any headers which should not apply to an error response."""
response = cherrypy.serving.response
# Remove headers which applied to the original content,
# but do not apply to the error page.
respheaders = response.headers
for key in ['Accept-Ranges', 'Age', 'ETag', 'Location', 'Retry-After',
'Vary', 'Content-Encoding', 'Content-Length', 'Expires',
'Content-Location', 'Content-MD5', 'Last-Modified']:
if key in respheaders:
del respheaders[key]
if status != 416:
# A server sending a response with status code 416 (Requested
# range not satisfiable) SHOULD include a Content-Range field
# with a byte-range-resp-spec of "*". The instance-length
# specifies the current length of the selected resource.
# A response with status code 206 (Partial Content) MUST NOT
# include a Content-Range field with a byte-range- resp-spec of "*".
if 'Content-Range' in respheaders:
del respheaders['Content-Range']
class HTTPError(CherryPyException):
"""Exception used to return an HTTP error code (4xx-5xx) to the client.
This exception can be used to automatically send a response using a
http status code, with an appropriate error page. It takes an optional
``status`` argument (which must be between 400 and 599); it defaults to 500
("Internal Server Error"). It also takes an optional ``message`` argument,
which will be returned in the response body. See
`RFC2616 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4>`_
for a complete list of available error codes and when to use them.
Examples::
raise cherrypy.HTTPError(403)
raise cherrypy.HTTPError(
"403 Forbidden", "You are not allowed to access this resource.")
"""
status = None
"""The HTTP status code.
May be of type int or str (with a Reason-Phrase).
"""
code = None
"""The integer HTTP status code."""
reason = None
"""The HTTP Reason-Phrase string."""
def __init__(self, status=500, message=None):
self.status = status
try:
self.code, self.reason, defaultmsg = _httputil.valid_status(status)
except ValueError:
raise self.__class__(500, _exc_info()[1].args[0])
if self.code < 400 or self.code > 599:
raise ValueError('status must be between 400 and 599.')
# See http://www.python.org/dev/peps/pep-0352/
# self.message = message
self._message = message or defaultmsg
CherryPyException.__init__(self, status, message)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent
self.
CherryPy uses this internally, but you can also use it to create
an HTTPError object and set its output without *raising* the
exception.
"""
response = cherrypy.serving.response
clean_headers(self.code)
# In all cases, finalize will be called after this method,
# so don't bother cleaning up response values here.
response.status = self.status
tb = None
if cherrypy.serving.request.show_tracebacks:
tb = format_exc()
response.headers.pop('Content-Length', None)
content = self.get_error_page(self.status, traceback=tb,
message=self._message)
response.body = content
_be_ie_unfriendly(self.code)
def get_error_page(self, *args, **kwargs):
return get_error_page(*args, **kwargs)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
@classmethod
@contextlib.contextmanager
def handle(cls, exception, status=500, message=''):
"""Translate exception into an HTTPError."""
try:
yield
except exception as exc:
raise cls(status, message or str(exc))
class NotFound(HTTPError):
"""Exception raised when a URL could not be mapped to any handler (404).
This is equivalent to raising :class:`HTTPError("404 Not Found")
<cherrypy._cperror.HTTPError>`.
"""
def __init__(self, path=None):
if path is None:
request = cherrypy.serving.request
path = request.script_name + request.path_info
self.args = (path,)
HTTPError.__init__(self, 404, "The path '%s' was not found." % path)
_HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta>
<title>%(status)s</title>
<style type="text/css">
#powered_by {
margin-top: 20px;
border-top: 2px solid black;
font-style: italic;
}
#traceback {
color: red;
}
</style>
</head>
<body>
<h2>%(status)s</h2>
<p>%(message)s</p>
<pre id="traceback">%(traceback)s</pre>
<div id="powered_by">
<span>
Powered by <a href="http://www.cherrypy.dev">CherryPy %(version)s</a>
</span>
</div>
</body>
</html>
'''
def get_error_page(status, **kwargs):
"""Return an HTML page, containing a pretty error response.
status should be an int or a str. kwargs will be interpolated into
the page template.
"""
try:
code, reason, message = _httputil.valid_status(status)
except ValueError:
raise cherrypy.HTTPError(500, _exc_info()[1].args[0])
# We can't use setdefault here, because some
# callers send None for kwarg values.
if kwargs.get('status') is None:
kwargs['status'] = '%s %s' % (code, reason)
if kwargs.get('message') is None:
kwargs['message'] = message
if kwargs.get('traceback') is None:
kwargs['traceback'] = ''
if kwargs.get('version') is None:
kwargs['version'] = cherrypy.__version__
for k, v in kwargs.items():
if v is None:
kwargs[k] = ''
else:
kwargs[k] = html.escape(kwargs[k], quote=False)
# Use a custom template or callable for the error page?
pages = cherrypy.serving.request.error_page
error_page = pages.get(code) or pages.get('default')
# Default template, can be overridden below.
template = _HTTPErrorTemplate
if error_page:
try:
if hasattr(error_page, '__call__'):
# The caller function may be setting headers manually,
# so we delegate to it completely. We may be returning
# an iterator as well as a string here.
#
# We *must* make sure any content is not unicode.
result = error_page(**kwargs)
if cherrypy.lib.is_iterator(result):
from cherrypy.lib.encoding import UTF8StreamEncoder
return UTF8StreamEncoder(result)
elif isinstance(result, str):
return result.encode('utf-8')
else:
if not isinstance(result, bytes):
raise ValueError(
'error page function did not '
'return a bytestring, str or an '
'iterator - returned object of type %s.'
% (type(result).__name__))
return result
else:
# Load the template from this path.
with io.open(error_page, newline='') as f:
template = f.read()
except Exception:
e = _format_exception(*_exc_info())[-1]
m = kwargs['message']
if m:
m += '<br />'
m += 'In addition, the custom error page failed:\n<br />%s' % e
kwargs['message'] = m
response = cherrypy.serving.response
response.headers['Content-Type'] = 'text/html;charset=utf-8'
result = template % kwargs
return result.encode('utf-8')
_ie_friendly_error_sizes = {
400: 512, 403: 256, 404: 512, 405: 256,
406: 512, 408: 512, 409: 512, 410: 256,
500: 512, 501: 512, 505: 512,
}
def _be_ie_unfriendly(status):
response = cherrypy.serving.response
# For some statuses, Internet Explorer 5+ shows "friendly error
# messages" instead of our response.body if the body is smaller
# than a given size. Fix this by returning a body over that size
# (by adding whitespace).
# See http://support.microsoft.com/kb/q218155/
s = _ie_friendly_error_sizes.get(status, 0)
if s:
s += 1
# Since we are issuing an HTTP error status, we assume that
# the entity is short, and we should just collapse it.
content = response.collapse_body()
content_length = len(content)
if content_length and content_length < s:
# IN ADDITION: the response must be written to IE
# in one chunk or it will still get replaced! Bah.
content = content + (b' ' * (s - content_length))
response.body = content
response.headers['Content-Length'] = str(len(content))
def format_exc(exc=None):
"""Return exc (or sys.exc_info if None), formatted."""
try:
if exc is None:
exc = _exc_info()
if exc == (None, None, None):
return ''
import traceback
return ''.join(traceback.format_exception(*exc))
finally:
del exc
def bare_error(extrabody=None):
"""Produce status, headers, body for a critical error.
Returns a triple without calling any other questionable functions,
so it should be as error-free as possible. Call it from an HTTP
server if you get errors outside of the request.
If extrabody is None, a friendly but rather unhelpful error message
is set in the body. If extrabody is a string, it will be appended
as-is to the body.
"""
# The whole point of this function is to be a last line-of-defense
# in handling errors. That is, it must not raise any errors itself;
# it cannot be allowed to fail. Therefore, don't add to it!
# In particular, don't call any other CP functions.
body = b'Unrecoverable error in the server.'
if extrabody is not None:
if not isinstance(extrabody, bytes):
extrabody = extrabody.encode('utf-8')
body += b'\n' + extrabody
return (b'500 Internal Server Error',
[(b'Content-Type', b'text/plain'),
(b'Content-Length', ntob(str(len(body)), 'ISO-8859-1'))],
[body])
| 23,111
|
Python
|
.py
| 500
| 38.188
| 80
| 0.636509
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,578
|
_json.py
|
rembo10_headphones/lib/cherrypy/_json.py
|
"""JSON support.
Expose preferred json module as json and provide encode/decode
convenience functions.
"""
try:
# Prefer simplejson
import simplejson as json
except ImportError:
import json
__all__ = ['json', 'encode', 'decode']
decode = json.JSONDecoder().decode
_encode = json.JSONEncoder().iterencode
def encode(value):
"""Encode to bytes."""
for chunk in _encode(value):
yield chunk.encode('utf-8')
| 439
|
Python
|
.py
| 16
| 24.1875
| 62
| 0.715663
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,579
|
daemon.py
|
rembo10_headphones/lib/cherrypy/daemon.py
|
"""The CherryPy daemon."""
import sys
import cherrypy
from cherrypy.process import plugins, servers
from cherrypy import Application
def start(configfiles=None, daemonize=False, environment=None,
fastcgi=False, scgi=False, pidfile=None, imports=None,
cgi=False):
"""Subscribe all engine plugins and start the engine."""
sys.path = [''] + sys.path
for i in imports or []:
exec('import %s' % i)
for c in configfiles or []:
cherrypy.config.update(c)
# If there's only one app mounted, merge config into it.
if len(cherrypy.tree.apps) == 1:
for app in cherrypy.tree.apps.values():
if isinstance(app, Application):
app.merge(c)
engine = cherrypy.engine
if environment is not None:
cherrypy.config.update({'environment': environment})
# Only daemonize if asked to.
if daemonize:
# Don't print anything to stdout/sterr.
cherrypy.config.update({'log.screen': False})
plugins.Daemonizer(engine).subscribe()
if pidfile:
plugins.PIDFile(engine, pidfile).subscribe()
if hasattr(engine, 'signal_handler'):
engine.signal_handler.subscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.subscribe()
if (fastcgi and (scgi or cgi)) or (scgi and cgi):
cherrypy.log.error('You may only specify one of the cgi, fastcgi, and '
'scgi options.', 'ENGINE')
sys.exit(1)
elif fastcgi or scgi or cgi:
# Turn off autoreload when using *cgi.
cherrypy.config.update({'engine.autoreload.on': False})
# Turn off the default HTTP server (which is subscribed by default).
cherrypy.server.unsubscribe()
addr = cherrypy.server.bind_addr
cls = (
servers.FlupFCGIServer if fastcgi else
servers.FlupSCGIServer if scgi else
servers.FlupCGIServer
)
f = cls(application=cherrypy.tree, bindAddress=addr)
s = servers.ServerAdapter(engine, httpserver=f, bind_addr=addr)
s.subscribe()
# Always start the engine; this will start all other services
try:
engine.start()
except Exception:
# Assume the error has been logged already via bus.log.
sys.exit(1)
else:
engine.block()
def run():
"""Run cherryd CLI."""
from optparse import OptionParser
p = OptionParser()
p.add_option('-c', '--config', action='append', dest='config',
help='specify config file(s)')
p.add_option('-d', action='store_true', dest='daemonize',
help='run the server as a daemon')
p.add_option('-e', '--environment', dest='environment', default=None,
help='apply the given config environment')
p.add_option('-f', action='store_true', dest='fastcgi',
help='start a fastcgi server instead of the default HTTP '
'server')
p.add_option('-s', action='store_true', dest='scgi',
help='start a scgi server instead of the default HTTP server')
p.add_option('-x', action='store_true', dest='cgi',
help='start a cgi server instead of the default HTTP server')
p.add_option('-i', '--import', action='append', dest='imports',
help='specify modules to import')
p.add_option('-p', '--pidfile', dest='pidfile', default=None,
help='store the process id in the given file')
p.add_option('-P', '--Path', action='append', dest='Path',
help='add the given paths to sys.path')
options, args = p.parse_args()
if options.Path:
for p in options.Path:
sys.path.insert(0, p)
start(options.config, options.daemonize,
options.environment, options.fastcgi, options.scgi,
options.pidfile, options.imports, options.cgi)
| 3,950
|
Python
|
.py
| 89
| 35.426966
| 79
| 0.624772
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,580
|
__init__.py
|
rembo10_headphones/lib/cherrypy/__init__.py
|
"""CherryPy is a pythonic, object-oriented HTTP framework.
CherryPy consists of not one, but four separate API layers.
The APPLICATION LAYER is the simplest. CherryPy applications are written as
a tree of classes and methods, where each branch in the tree corresponds to
a branch in the URL path. Each method is a 'page handler', which receives
GET and POST params as keyword arguments, and returns or yields the (HTML)
body of the response. The special method name 'index' is used for paths
that end in a slash, and the special method name 'default' is used to
handle multiple paths via a single handler. This layer also includes:
* the 'exposed' attribute (and cherrypy.expose)
* cherrypy.quickstart()
* _cp_config attributes
* cherrypy.tools (including cherrypy.session)
* cherrypy.url()
The ENVIRONMENT LAYER is used by developers at all levels. It provides
information about the current request and response, plus the application
and server environment, via a (default) set of top-level objects:
* cherrypy.request
* cherrypy.response
* cherrypy.engine
* cherrypy.server
* cherrypy.tree
* cherrypy.config
* cherrypy.thread_data
* cherrypy.log
* cherrypy.HTTPError, NotFound, and HTTPRedirect
* cherrypy.lib
The EXTENSION LAYER allows advanced users to construct and share their own
plugins. It consists of:
* Hook API
* Tool API
* Toolbox API
* Dispatch API
* Config Namespace API
Finally, there is the CORE LAYER, which uses the core API's to construct
the default components which are available at higher layers. You can think
of the default components as the 'reference implementation' for CherryPy.
Megaframeworks (and advanced users) may replace the default components
with customized or extended components. The core API's are:
* Application API
* Engine API
* Request API
* Server API
* WSGI API
These API's are described in the `CherryPy specification
<https://github.com/cherrypy/cherrypy/wiki/CherryPySpec>`_.
"""
try:
import importlib.metadata as importlib_metadata
except ImportError:
# fall back for python <= 3.7
# This try/except can be removed with py <= 3.7 support
import importlib_metadata
from threading import local as _local
from ._cperror import (
HTTPError, HTTPRedirect, InternalRedirect,
NotFound, CherryPyException,
)
from . import _cpdispatch as dispatch
from ._cptools import default_toolbox as tools, Tool
from ._helper import expose, popargs, url
from . import _cprequest, _cpserver, _cptree, _cplogging, _cpconfig
import cherrypy.lib.httputil as _httputil
from ._cptree import Application
from . import _cpwsgi as wsgi
from . import process
try:
from .process import win32
engine = win32.Win32Bus()
engine.console_control_handler = win32.ConsoleCtrlHandler(engine)
del win32
except ImportError:
engine = process.bus
from . import _cpchecker
__all__ = (
'HTTPError', 'HTTPRedirect', 'InternalRedirect',
'NotFound', 'CherryPyException',
'dispatch', 'tools', 'Tool', 'Application',
'wsgi', 'process', 'tree', 'engine',
'quickstart', 'serving', 'request', 'response', 'thread_data',
'log', 'expose', 'popargs', 'url', 'config',
)
__import__('cherrypy._cptools')
__import__('cherrypy._cprequest')
tree = _cptree.Tree()
try:
__version__ = importlib_metadata.version('cherrypy')
except Exception:
__version__ = 'unknown'
engine.listeners['before_request'] = set()
engine.listeners['after_request'] = set()
engine.autoreload = process.plugins.Autoreloader(engine)
engine.autoreload.subscribe()
engine.thread_manager = process.plugins.ThreadManager(engine)
engine.thread_manager.subscribe()
engine.signal_handler = process.plugins.SignalHandler(engine)
class _HandleSignalsPlugin(object):
"""Handle signals from other processes.
Based on the configured platform handlers above.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Add the handlers based on the platform."""
if hasattr(self.bus, 'signal_handler'):
self.bus.signal_handler.subscribe()
if hasattr(self.bus, 'console_control_handler'):
self.bus.console_control_handler.subscribe()
engine.signals = _HandleSignalsPlugin(engine)
server = _cpserver.Server()
server.subscribe()
def quickstart(root=None, script_name='', config=None):
"""Mount the given root, start the builtin server (and engine), then block.
root: an instance of a "controller class" (a collection of page handler
methods) which represents the root of the application.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the URL
at which to mount the given root. For example, if root.index() will
handle requests to "http://www.example.com:8080/dept/app1/", then
the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the root
of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config. If this contains
a [global] section, those entries will be used in the global
(site-wide) config.
"""
if config:
_global_conf_alias.update(config)
tree.mount(root, script_name, config)
engine.signals.subscribe()
engine.start()
engine.block()
class _Serving(_local):
"""An interface for registering request and response objects.
Rather than have a separate "thread local" object for the request
and the response, this class works as a single threadlocal container
for both objects (and any others which developers wish to define).
In this way, we can easily dump those objects when we stop/start a
new HTTP conversation, yet still refer to them as module-level
globals in a thread-safe way.
"""
request = _cprequest.Request(_httputil.Host('127.0.0.1', 80),
_httputil.Host('127.0.0.1', 1111))
"""The request object for the current thread.
In the main thread, and any threads which are not receiving HTTP
requests, this is None.
"""
response = _cprequest.Response()
"""The response object for the current thread.
In the main thread, and any threads which are not receiving HTTP
requests, this is None.
"""
def load(self, request, response):
self.request = request
self.response = response
def clear(self):
"""Remove all attributes of self."""
self.__dict__.clear()
serving = _Serving()
class _ThreadLocalProxy(object):
__slots__ = ['__attrname__', '__dict__']
def __init__(self, attrname):
self.__attrname__ = attrname
def __getattr__(self, name):
child = getattr(serving, self.__attrname__)
return getattr(child, name)
def __setattr__(self, name, value):
if name in ('__attrname__', ):
object.__setattr__(self, name, value)
else:
child = getattr(serving, self.__attrname__)
setattr(child, name, value)
def __delattr__(self, name):
child = getattr(serving, self.__attrname__)
delattr(child, name)
@property
def __dict__(self):
child = getattr(serving, self.__attrname__)
d = child.__class__.__dict__.copy()
d.update(child.__dict__)
return d
def __getitem__(self, key):
child = getattr(serving, self.__attrname__)
return child[key]
def __setitem__(self, key, value):
child = getattr(serving, self.__attrname__)
child[key] = value
def __delitem__(self, key):
child = getattr(serving, self.__attrname__)
del child[key]
def __contains__(self, key):
child = getattr(serving, self.__attrname__)
return key in child
def __len__(self):
child = getattr(serving, self.__attrname__)
return len(child)
def __nonzero__(self):
child = getattr(serving, self.__attrname__)
return bool(child)
# Python 3
__bool__ = __nonzero__
# Create request and response object (the same objects will be used
# throughout the entire life of the webserver, but will redirect
# to the "serving" object)
request = _ThreadLocalProxy('request')
response = _ThreadLocalProxy('response')
# Create thread_data object as a thread-specific all-purpose storage
class _ThreadData(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
class _GlobalLogManager(_cplogging.LogManager):
"""A site-wide LogManager; routes to app.log or global log as appropriate.
This :class:`LogManager<cherrypy._cplogging.LogManager>` implements
cherrypy.log() and cherrypy.log.access(). If either
function is called during a request, the message will be sent to the
logger for the current Application. If they are called outside of a
request, the message will be sent to the site-wide logger.
"""
def __call__(self, *args, **kwargs):
"""Log the given message to the app.log or global log.
Log the given message to the app.log or global log as
appropriate.
"""
# Do NOT use try/except here. See
# https://github.com/cherrypy/cherrypy/issues/945
if hasattr(request, 'app') and hasattr(request.app, 'log'):
log = request.app.log
else:
log = self
return log.error(*args, **kwargs)
def access(self):
"""Log an access message to the app.log or global log.
Log the given message to the app.log or global log as
appropriate.
"""
try:
return request.app.log.access()
except AttributeError:
return _cplogging.LogManager.access(self)
log = _GlobalLogManager()
# Set a default screen handler on the global log.
log.screen = True
log.error_file = ''
# Using an access file makes CP about 10% slower. Leave off by default.
log.access_file = ''
@engine.subscribe('log')
def _buslog(msg, level):
log.error(msg, 'ENGINE', severity=level)
# Use _global_conf_alias so quickstart can use 'config' as an arg
# without shadowing cherrypy.config.
config = _global_conf_alias = _cpconfig.Config()
config.defaults = {
'tools.log_tracebacks.on': True,
'tools.log_headers.on': True,
'tools.trailing_slash.on': True,
'tools.encode.on': True
}
config.namespaces['log'] = lambda k, v: setattr(log, k, v)
config.namespaces['checker'] = lambda k, v: setattr(checker, k, v)
# Must reset to get our defaults applied.
config.reset()
checker = _cpchecker.Checker()
engine.subscribe('start', checker)
| 11,431
|
Python
|
.py
| 278
| 36.219424
| 79
| 0.701131
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,581
|
_cpserver.py
|
rembo10_headphones/lib/cherrypy/_cpserver.py
|
"""Manage HTTP servers with CherryPy."""
import cherrypy
from cherrypy.lib.reprconf import attributes
from cherrypy._cpcompat import text_or_bytes
from cherrypy.process.servers import ServerAdapter
__all__ = ('Server', )
class Server(ServerAdapter):
"""An adapter for an HTTP server.
You can set attributes (like socket_host and socket_port)
on *this* object (which is probably cherrypy.server), and call
quickstart. For example::
cherrypy.server.socket_port = 80
cherrypy.quickstart()
"""
socket_port = 8080
"""The TCP port on which to listen for connections."""
_socket_host = '127.0.0.1'
@property
def socket_host(self): # noqa: D401; irrelevant for properties
"""The hostname or IP address on which to listen for connections.
Host values may be any IPv4 or IPv6 address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if
your hosts file prefers IPv6). The string '0.0.0.0' is a special
IPv4 entry meaning "any active interface" (INADDR_ANY), and '::'
is the similar IN6ADDR_ANY for IPv6. The empty string or None are
not allowed.
"""
return self._socket_host
@socket_host.setter
def socket_host(self, value):
if value == '':
raise ValueError("The empty string ('') is not an allowed value. "
"Use '0.0.0.0' instead to listen on all active "
'interfaces (INADDR_ANY).')
self._socket_host = value
socket_file = None
"""If given, the name of the UNIX socket to use instead of TCP/IP.
When this option is not None, the `socket_host` and `socket_port` options
are ignored.
"""
socket_queue_size = 5
"""The 'backlog' argument to socket.listen(); specifies the maximum number
of queued connections (default 5)."""
socket_timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
accepted_queue_size = -1
"""The maximum number of requests which will be queued up before
the server refuses to accept it (default -1, meaning no limit)."""
accepted_queue_timeout = 10
"""The timeout in seconds for attempting to add a request to the
queue when the queue is full (default 10)."""
shutdown_timeout = 5
"""The time to wait for HTTP worker threads to clean up."""
protocol_version = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses,
for example, "HTTP/1.1" (the default). Depending on the HTTP server used,
this should also limit the supported features used in the response."""
thread_pool = 10
"""The number of worker threads to start up in the pool."""
thread_pool_max = -1
"""The maximum size of the worker-thread pool.
Use -1 to indicate no limit.
"""
max_request_header_size = 500 * 1024
"""The maximum number of bytes allowable in the request headers.
If exceeded, the HTTP server should return "413 Request Entity Too
Large".
"""
max_request_body_size = 100 * 1024 * 1024
"""The maximum number of bytes allowable in the request body.
If exceeded, the HTTP server should return "413 Request Entity Too
Large".
"""
instance = None
"""If not None, this should be an HTTP server instance (such as
cheroot.wsgi.Server) which cherrypy.server will control.
Use this when you need
more control over object instantiation than is available in the various
configuration options."""
ssl_context = None
"""When using PyOpenSSL, an instance of SSL.Context."""
ssl_certificate = None
"""The filename of the SSL certificate to use."""
ssl_certificate_chain = None
"""When using PyOpenSSL, the certificate chain to pass to
Context.load_verify_locations."""
ssl_private_key = None
"""The filename of the private key to use with SSL."""
ssl_ciphers = None
"""The ciphers list of SSL."""
ssl_module = 'builtin'
"""The name of a registered SSL adaptation module to use with
the builtin WSGI server. Builtin options are: 'builtin' (to
use the SSL library built into recent versions of Python).
You may also register your own classes in the
cheroot.server.ssl_adapters dict.
"""
statistics = False
"""Turns statistics-gathering on or off for aware HTTP servers."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
wsgi_version = (1, 0)
"""The WSGI version tuple to use with the builtin WSGI server.
The provided options are (1, 0) [which includes support for PEP
3333, which declares it covers WSGI version 1.0.1 but still mandates
the wsgi.version (1, 0)] and ('u', 0), an experimental unicode
version. You may create and register your own experimental versions
of the WSGI protocol by adding custom classes to the
cheroot.server.wsgi_gateways dict.
"""
peercreds = False
"""If True, peer cred lookup for UNIX domain socket will put to WSGI env.
This information will then be available through WSGI env vars:
* X_REMOTE_PID
* X_REMOTE_UID
* X_REMOTE_GID
"""
peercreds_resolve = False
"""If True, username/group will be looked up in the OS from peercreds.
This information will then be available through WSGI env vars:
* REMOTE_USER
* X_REMOTE_USER
* X_REMOTE_GROUP
"""
def __init__(self):
"""Initialize Server instance."""
self.bus = cherrypy.engine
self.httpserver = None
self.interrupt = None
self.running = False
def httpserver_from_self(self, httpserver=None):
"""Return a (httpserver, bind_addr) pair based on self attributes."""
if httpserver is None:
httpserver = self.instance
if httpserver is None:
from cherrypy import _cpwsgi_server
httpserver = _cpwsgi_server.CPWSGIServer(self)
if isinstance(httpserver, text_or_bytes):
# Is anyone using this? Can I add an arg?
httpserver = attributes(httpserver)(self)
return httpserver, self.bind_addr
def start(self):
"""Start the HTTP server."""
if not self.httpserver:
self.httpserver, self.bind_addr = self.httpserver_from_self()
super(Server, self).start()
start.priority = 75
@property
def bind_addr(self):
"""Return bind address.
A (host, port) tuple for TCP sockets or a str for Unix domain
sockets.
"""
if self.socket_file:
return self.socket_file
if self.socket_host is None and self.socket_port is None:
return None
return (self.socket_host, self.socket_port)
@bind_addr.setter
def bind_addr(self, value):
if value is None:
self.socket_file = None
self.socket_host = None
self.socket_port = None
elif isinstance(value, text_or_bytes):
self.socket_file = value
self.socket_host = None
self.socket_port = None
else:
try:
self.socket_host, self.socket_port = value
self.socket_file = None
except ValueError:
raise ValueError('bind_addr must be a (host, port) tuple '
'(for TCP sockets) or a string (for Unix '
'domain sockets), not %r' % value)
def base(self):
"""Return the base for this server.
e.i. scheme://host[:port] or sock file
"""
if self.socket_file:
return self.socket_file
host = self.socket_host
if host in ('0.0.0.0', '::'):
# 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY.
# Look up the host name, which should be the
# safest thing to spit out in a URL.
import socket
host = socket.gethostname()
port = self.socket_port
if self.ssl_certificate:
scheme = 'https'
if port != 443:
host += ':%s' % port
else:
scheme = 'http'
if port != 80:
host += ':%s' % port
return '%s://%s' % (scheme, host)
| 8,364
|
Python
|
.py
| 199
| 34.045226
| 78
| 0.635187
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,582
|
_cptree.py
|
rembo10_headphones/lib/cherrypy/_cptree.py
|
"""CherryPy Application and Tree objects."""
import os
import cherrypy
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil, reprconf
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects
directly. Instead, they should ask an Application object for a
request object.
An instance of this class may also be used as a WSGI callable (WSGI
application object) for itself.
"""
root = None
"""The top-most container of page handlers for this app.
Handlers should be arranged in a hierarchy of attributes, matching
the expected URI hierarchy; the default dispatcher then searches
this hierarchy for a matching handler. When using a dispatcher other
than the default, this value may be None.
"""
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = reprconf.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
"""A LogManager instance.
See _cplogging.
"""
wsgiapp = None
"""A CPWSGIApp instance.
See _cpwsgi.
"""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name='', config=None):
"""Initialize Application with given root."""
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces['log'] = lambda k, v: setattr(self.log, k, v)
self.namespaces['wsgi'] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
"""Generate a representation of the Application instance."""
return '%s.%s(%r, %r)' % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name_doc = """The URI "mount point" for this app. A mount point
is that portion of the URI which is constant for all URIs that are
serviced by this application; it does not include scheme, host, or proxy
("virtual host") portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
@property
def script_name(self): # noqa: D401; irrelevant for properties
"""The URI "mount point" for this app.
A mount point is that portion of the URI which is constant for
all URIs that are serviced by this application; it does not
include scheme, host, or proxy ("virtual host") portions of the
URI.
For example, if script_name is "/my/cool/app", then the URL "
http://www.example.com/my/cool/app/page1"
might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
if self._script_name is not None:
return self._script_name
# A `_script_name` with a value of None signals that the script name
# should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip('/')
@script_name.setter
def script_name(self, value):
if value:
value = value.rstrip('/')
self._script_name = value
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get('/', {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or '/'
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind('/')
if lastslash == -1:
break
elif lastslash == 0 and trail != '/':
trail = '/'
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.publish('acquire_thread')
cherrypy.engine.publish('before_request')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.publish('after_request')
try:
req.close()
except Exception:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
"""Call a WSGI-callable."""
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable (WSGI
application object), in which case it dispatches to all mounted
apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
"""Initialize registry Tree."""
self.apps = {}
def mount(self, root, script_name='', config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
'objects may, however, possess a script_name of None (in '
'order to inpect the WSGI environ for SCRIPT_NAME upon each '
'request). You cannot mount such Applications on this Tree; '
'you must pass them to a WSGI server interface directly.')
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
if isinstance(root, Application):
app = root
if script_name != '' and script_name != app.script_name:
raise ValueError(
'Cannot specify a different script name and pass an '
'Application instance to cherrypy.mount')
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
needs_favicon = (
script_name == ''
and root is not None
and not hasattr(root, 'favicon_ico')
)
if needs_favicon:
favicon = os.path.join(
os.getcwd(),
os.path.dirname(__file__),
'favicon.ico',
)
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=''):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""Return the script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == '':
return None
# Move one node up the tree and try again.
path = path[:path.rfind('/')]
def __call__(self, environ, start_response):
"""Pre-initialize WSGI env and call WSGI-callable."""
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or '/')
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip('/')):]
return app(environ, start_response)
| 11,025
|
Python
|
.py
| 242
| 35.657025
| 79
| 0.610307
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,583
|
_helper.py
|
rembo10_headphones/lib/cherrypy/_helper.py
|
"""Helper functions for CP apps."""
import urllib.parse
from cherrypy._cpcompat import text_or_bytes
import cherrypy
def expose(func=None, alias=None):
"""Expose the function or class.
Optionally provide an alias or set of aliases.
"""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, text_or_bytes):
parents[alias.replace('.', '_')] = func
else:
for a in alias:
parents[a.replace('.', '_')] = func
return func
import sys
import types
decoratable_types = types.FunctionType, types.MethodType, type,
if isinstance(func, decoratable_types):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def popargs(*args, **kwargs):
"""Decorate _cp_dispatch.
(cherrypy.dispatch.Dispatcher.dispatch_method_name)
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
.. code-block:: python
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create.
#Defined functions take precedence over arguments.
Or as a member of a class:
.. code-block:: python
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
.. code-block:: python
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
handler = None
handler_call = False
for k, v in kwargs.items():
if k == 'handler':
handler = v
else:
tm = "cherrypy.popargs() got an unexpected keyword argument '{0}'"
raise TypeError(tm.format(k))
import inspect
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
# cherrypy.popargs is a class decorator
cls = cls_or_self
name = cherrypy.dispatch.Dispatcher.dispatch_method_name
setattr(cls, name, decorated)
return cls
# We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
cherrypy.request.params.update(parms)
return handler
cherrypy.request.params.update(parms)
# If we are the ultimate handler, then to prevent our _cp_dispatch
# from being called again, we will resolve remaining elements through
# getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
def url(path='', qs='', script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used to find a
script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute
URL for the current request path (minus the querystring) by passing
no args. If you call url(qs=cherrypy.request.query_string), you
should get the original browser URL (assuming no internal
redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = urllib.parse.urlencode(qs)
if qs:
qs = '?' + qs
if cherrypy.request.app:
if not path.startswith('/'):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = cherrypy.request.path_info
if cherrypy.request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif cherrypy.request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == '':
path = pi
else:
path = urllib.parse.urljoin(pi, path)
if script_name is None:
script_name = cherrypy.request.script_name
if base is None:
base = cherrypy.request.base
newurl = base + script_name + normalize_path(path) + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = cherrypy.server.base()
path = (script_name or '') + path
newurl = base + normalize_path(path) + qs
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(cherrypy.request.app, 'relative_urls', False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url(relative=False).split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
def normalize_path(path):
"""Resolve given path from relative into absolute form."""
if './' not in path:
return path
# Normalize the URL by removing ./ and ../
atoms = []
for atom in path.split('/'):
if atom == '.':
pass
elif atom == '..':
# Don't pop from empty list
# (i.e. ignore redundant '..')
if atoms:
atoms.pop()
elif atom:
atoms.append(atom)
newpath = '/'.join(atoms)
# Preserve leading '/'
if path.startswith('/'):
newpath = '/' + newpath
return newpath
####
# Inlined from jaraco.classes 1.4.3
# Ref #1673
class _ClassPropertyDescriptor(object):
"""Descript for read-only class-based property.
Turns a classmethod-decorated func into a read-only property of that
class type (means the value cannot be set).
"""
def __init__(self, fget, fset=None):
"""Initialize a class property descriptor.
Instantiated by ``_helper.classproperty``.
"""
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
"""Return property value."""
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def classproperty(func): # noqa: D401; irrelevant for properties
"""Decorator like classmethod to implement a static class property."""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return _ClassPropertyDescriptor(func)
####
| 11,656
|
Python
|
.py
| 278
| 32.805755
| 78
| 0.611956
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,584
|
_cpreqbody.py
|
rembo10_headphones/lib/cherrypy/_cpreqbody.py
|
"""Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
'''Read application/json data into request.json.'''
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
from urllib.parse import unquote
import cheroot.server
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.lib import httputil
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(b'+', b' ')
atoms = bs.split(b'%')
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return b''.join(atoms)
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(b'&'):
for pair in aparam.split(b';'):
if not pair:
continue
atoms = pair.split(b'=', 1)
if len(atoms) == 1:
atoms.append(b'')
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, 'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ''
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match('^[ -~]{0,200}[!-~]$', ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower.
Deprecated and will be changed in 3.3.
"""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = \
_cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the
Content-Type given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given.
The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing
of multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get('charset', None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
if 'filename*' in disp.params:
# @see https://tools.ietf.org/html/rfc5987
encoding, lang, filename = disp.params['filename*'].split("'")
self.filename = unquote(str(filename), encoding)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`.
"""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self, value):
"""Return a given byte encoded value as a string."""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
'The request entity could not be decoded. The following '
'charsets were attempted: %s' % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
r"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError('Illegal end of headers.')
if line == b'\r\n':
# Normal end of headers
break
if not line.endswith(b'\r\n'):
raise ValueError('MIME requires CRLF terminators: %r' % line)
if line[0] in b' \t':
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(b':', 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ', '.join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read
are returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + b'--'
delim = b''
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError('Illegal end of multipart body.')
if line.startswith(b'--') and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(b'\r\n'):
delim = b'\r\n'
line = line[:-2]
prev_lf = True
elif line.endswith(b'\n'):
delim = b'\n'
line = line[:-1]
prev_lf = True
else:
delim = b''
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = b''.join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
inf = float('inf')
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = b''
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are
read off the socket. The actual number of bytes read are tracked
in self.bytes_read. The number may be smaller than 'size' when
1) the client sends fewer bytes, 2) the 'Content-Length' request
header specifies fewer bytes than requested, or 3) the number of
bytes read exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read
are returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return b''
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = b''
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return b''.join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(b'\n') + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return b''.join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in b' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(b':', 1)
except ValueError:
raise ValueError('Illegal header line.')
k = k.strip().title()
v = v.strip()
if k in cheroot.server.comma_separated_headers:
existing = self.trailers.get(k)
if existing:
v = b', '.join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, 'Maximum request length: %r' % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| 36,404
|
Python
|
.py
| 837
| 33.550777
| 79
| 0.602152
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,585
|
_cptools.py
|
rembo10_headphones/lib/cherrypy/_cptools.py
|
"""CherryPy tools. A "tool" is any helper, adapted to CP.
Tools are usually designed to be used in a variety of ways (although
some may only offer one if they choose):
Library calls
All tools are callables that can be used wherever needed.
The arguments are straightforward and should be detailed within the
docstring.
Function decorators
All tools, when called, may be used as decorators which configure
individual CherryPy page handlers (methods on the CherryPy tree).
That is, "@tools.anytool()" should "turn on" the tool via the
decorated function's _cp_config attribute.
CherryPy config
If a tool exposes a "_setup" callable, it will be called
once per Request (if the feature is "turned on" via config).
Tools may be implemented as any object with a namespace. The builtins
are generally either modules or instances of the tools.Tool class.
"""
import cherrypy
from cherrypy._helper import expose
from cherrypy.lib import cptools, encoding, static, jsontools
from cherrypy.lib import sessions as _sessions, xmlrpcutil as _xmlrpc
from cherrypy.lib import caching as _caching
from cherrypy.lib import auth_basic, auth_digest
def _getargs(func):
"""Return the names of all static arguments to the given function."""
# Use this instead of importing inspect for less mem overhead.
import types
if isinstance(func, types.MethodType):
func = func.__func__
co = func.__code__
return co.co_varnames[:co.co_argcount]
_attr_error = (
'CherryPy Tools cannot be turned on directly. Instead, turn them '
'on via config, or use them as decorators on your page handlers.'
)
class Tool(object):
"""A registered function for use with CherryPy request-processing hooks.
help(tool.callable) should give you more information about this
Tool.
"""
namespace = 'tools'
def __init__(self, point, callable, name=None, priority=50):
self._point = point
self.callable = callable
self._name = name
self._priority = priority
self.__doc__ = self.callable.__doc__
self._setargs()
@property
def on(self):
raise AttributeError(_attr_error)
@on.setter
def on(self, value):
raise AttributeError(_attr_error)
def _setargs(self):
"""Copy func parameter names to obj attributes."""
try:
for arg in _getargs(self.callable):
setattr(self, arg, None)
except (TypeError, AttributeError):
if hasattr(self.callable, '__call__'):
for arg in _getargs(self.callable.__call__):
setattr(self, arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _merged_args(self, d=None):
"""Return a dict of configuration entries for this Tool."""
if d:
conf = d.copy()
else:
conf = {}
tm = cherrypy.serving.request.toolmaps[self.namespace]
if self._name in tm:
conf.update(tm[self._name])
if 'on' in conf:
del conf['on']
return conf
def __call__(self, *args, **kwargs):
"""Compile-time decorator (turn on the tool in config).
For example::
@expose
@tools.proxy()
def whats_my_base(self):
return cherrypy.request.base
"""
if args:
raise TypeError('The %r Tool does not accept positional '
'arguments; you must use keyword arguments.'
% self._name)
def tool_decorator(f):
if not hasattr(f, '_cp_config'):
f._cp_config = {}
subspace = self.namespace + '.' + self._name + '.'
f._cp_config[subspace + 'on'] = True
for k, v in kwargs.items():
f._cp_config[subspace + k] = v
return f
return tool_decorator
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop('priority', None)
if p is None:
p = getattr(self.callable, 'priority', self._priority)
cherrypy.serving.request.hooks.attach(self._point, self.callable,
priority=p, **conf)
class HandlerTool(Tool):
"""Tool which is called 'before main', that may skip normal handlers.
If the tool successfully handles the request (by setting
response.body), if should return True. This will cause CherryPy to
skip any 'normal' page handler. If the tool did not handle the
request, it should return False to tell CherryPy to continue on and
call the normal page handler. If the tool is declared AS a page
handler (see the 'handler' method), returning False will raise
NotFound.
"""
def __init__(self, callable, name=None):
Tool.__init__(self, 'before_handler', callable, name)
def handler(self, *args, **kwargs):
"""Use this tool as a CherryPy page handler.
For example::
class Root:
nav = tools.staticdir.handler(section="/nav", dir="nav",
root=absDir)
"""
@expose
def handle_func(*a, **kw):
handled = self.callable(*args, **self._merged_args(kwargs))
if not handled:
raise cherrypy.NotFound()
return cherrypy.serving.response.body
return handle_func
def _wrapper(self, **kwargs):
if self.callable(**kwargs):
cherrypy.serving.request.handler = None
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop('priority', None)
if p is None:
p = getattr(self.callable, 'priority', self._priority)
cherrypy.serving.request.hooks.attach(self._point, self._wrapper,
priority=p, **conf)
class HandlerWrapperTool(Tool):
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
'next_handler' argument, plus ``*args`` and ``**kwargs``. Like all
page handler
functions, it must return an iterable for use as cherrypy.response.body.
For example, to allow your 'inner' page handlers to return dicts
which then get interpolated into a template::
def interpolator(next_handler, *args, **kwargs):
filename = cherrypy.request.config.get('template')
cherrypy.response.template = env.get_template(filename)
response_dict = next_handler(*args, **kwargs)
return cherrypy.response.template.render(**response_dict)
cherrypy.tools.jinja = HandlerWrapperTool(interpolator)
"""
def __init__(self, newhandler, point='before_handler', name=None,
priority=50):
self.newhandler = newhandler
self._point = point
self._name = name
self._priority = priority
def callable(self, *args, **kwargs):
innerfunc = cherrypy.serving.request.handler
def wrap(*args, **kwargs):
return self.newhandler(innerfunc, *args, **kwargs)
cherrypy.serving.request.handler = wrap
class ErrorTool(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
class SessionTool(Tool):
"""Session Tool for CherryPy.
sessions.locking
When 'implicit' (the default), the session will be locked for you,
just before running the page handler.
When 'early', the session will be locked before reading the request
body. This is off by default for safety reasons; for example,
a large upload would block the session, denying an AJAX
progress meter
(`issue <https://github.com/cherrypy/cherrypy/issues/630>`_).
When 'explicit' (or any other value), you need to call
cherrypy.session.acquire_lock() yourself before using
session data.
"""
def __init__(self):
# _sessions.init must be bound after headers are read
Tool.__init__(self, 'before_request_body', _sessions.init)
def _lock_session(self):
cherrypy.serving.session.acquire_lock()
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
conf = self._merged_args()
p = conf.pop('priority', None)
if p is None:
p = getattr(self.callable, 'priority', self._priority)
hooks.attach(self._point, self.callable, priority=p, **conf)
locking = conf.pop('locking', 'implicit')
if locking == 'implicit':
hooks.attach('before_handler', self._lock_session)
elif locking == 'early':
# Lock before the request body (but after _sessions.init runs!)
hooks.attach('before_request_body', self._lock_session,
priority=60)
else:
# Don't lock
pass
hooks.attach('before_finalize', _sessions.save)
hooks.attach('on_end_request', _sessions.close)
def regenerate(self):
"""Drop the current session and make a new one (with a new id)."""
sess = cherrypy.serving.session
sess.regenerate()
# Grab cookie-relevant tool args
relevant = 'path', 'path_header', 'name', 'timeout', 'domain', 'secure'
conf = dict(
(k, v)
for k, v in self._merged_args().items()
if k in relevant
)
_sessions.set_response_cookie(**conf)
class XMLRPCController(object):
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
turn on the tool for you).
You can also supply the following optional config entries::
tools.xmlrpc.encoding: 'utf-8'
tools.xmlrpc.allow_none: 0
XML-RPC is a rather discontinuous layer over HTTP; dispatching to the
appropriate handler must first be performed according to the URL, and
then a second dispatch step must take place according to the RPC method
specified in the request body. It also allows a superfluous "/RPC2"
prefix in the URL, supplies its own handler args in the body, and
requires a 200 OK "Fault" response instead of 404 when the desired
method is not found.
Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone.
This Controller acts as the dispatch target for the first half (based
on the URL); it then reads the RPC method from the request body and
does its own second dispatch step based on that method. It also reads
body params, and returns a Fault on error.
The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2
in your URL's, you can safely skip turning on the XMLRPCDispatcher.
Otherwise, you need to use declare it in config::
request.dispatch: cherrypy.dispatch.XMLRPCDispatcher()
"""
# Note we're hard-coding this into the 'tools' namespace. We could do
# a huge amount of work to make it relocatable, but the only reason why
# would be if someone actually disabled the default_toolbox. Meh.
_cp_config = {'tools.xmlrpc.on': True}
@expose
def default(self, *vpath, **params):
rpcparams, rpcmethod = _xmlrpc.process_body()
subhandler = self
for attr in str(rpcmethod).split('.'):
subhandler = getattr(subhandler, attr, None)
if subhandler and getattr(subhandler, 'exposed', False):
body = subhandler(*(vpath + rpcparams), **params)
else:
# https://github.com/cherrypy/cherrypy/issues/533
# if a method is not found, an xmlrpclib.Fault should be returned
# raising an exception here will do that; see
# cherrypy.lib.xmlrpcutil.on_error
raise Exception('method "%s" is not supported' % attr)
conf = cherrypy.serving.request.toolmaps['tools'].get('xmlrpc', {})
_xmlrpc.respond(body,
conf.get('encoding', 'utf-8'),
conf.get('allow_none', 0))
return cherrypy.serving.response.body
class SessionAuthTool(HandlerTool):
pass
class CachingTool(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach('before_finalize', _caching.tee_output,
priority=100)
_wrapper.priority = 90
def _setup(self):
"""Hook caching into cherrypy.request."""
conf = self._merged_args()
p = conf.pop('priority', None)
cherrypy.serving.request.hooks.attach('before_handler', self._wrapper,
priority=p, **conf)
class Toolbox(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes
dict.
"""
def __init__(self, namespace):
self.namespace = namespace
def __setattr__(self, name, value):
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split('.', 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get('on', False):
tool = getattr(self, name)
tool._setup()
def register(self, point, **kwargs):
"""
Return a decorator which registers the function
at the given hook point.
"""
def decorator(func):
attr_name = kwargs.get('name', func.__name__)
tool = Tool(point, func, **kwargs)
setattr(self, attr_name, tool)
return func
return decorator
default_toolbox = _d = Toolbox('tools')
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.allow = Tool('on_start_resource', cptools.allow)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
_d.params = Tool('before_handler', cptools.convert_params, priority=15)
del _d, cptools, encoding, static
| 18,167
|
Python
|
.py
| 392
| 37.670918
| 79
| 0.640011
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,586
|
_cpconfig.py
|
rembo10_headphones/lib/cherrypy/_cpconfig.py
|
"""Configuration system for CherryPy.
Configuration in CherryPy is implemented via dictionaries. Keys are strings
which name the mapped value, which may be of any type.
Architecture
------------
CherryPy Requests are part of an Application, which runs in a global context,
and configuration data may apply to any of those three scopes:
Global
Configuration entries which apply everywhere are stored in
cherrypy.config.
Application
Entries which apply to each mounted application are stored
on the Application object itself, as 'app.config'. This is a two-level
dict where each key is a path, or "relative URL" (for example, "/" or
"/path/to/my/page"), and each value is a config dict. Usually, this
data is provided in the call to tree.mount(root(), config=conf),
although you may also use app.merge(conf).
Request
Each Request object possesses a single 'Request.config' dict.
Early in the request process, this dict is populated by merging global
config entries, Application entries (whose path equals or is a parent
of Request.path_info), and any config acquired while looking up the
page handler (see next).
Declaration
-----------
Configuration data may be supplied as a Python dictionary, as a filename,
or as an open file object. When you supply a filename or file, CherryPy
uses Python's builtin ConfigParser; you declare Application config by
writing each path as a section header::
[/path/to/my/page]
request.stream = True
To declare global configuration entries, place them in a [global] section.
You may also declare config entries directly on the classes and methods
(page handlers) that make up your CherryPy application via the ``_cp_config``
attribute, set with the ``cherrypy.config`` decorator. For example::
@cherrypy.config(**{'tools.gzip.on': True})
class Demo:
@cherrypy.expose
@cherrypy.config(**{'request.show_tracebacks': False})
def index(self):
return "Hello world"
.. note::
This behavior is only guaranteed for the default dispatcher.
Other dispatchers may have different restrictions on where
you can attach config attributes.
Namespaces
----------
Configuration keys are separated into namespaces by the first "." in the key.
Current namespaces:
engine
Controls the 'application engine', including autoreload.
These can only be declared in the global config.
tree
Grafts cherrypy.Application objects onto cherrypy.tree.
These can only be declared in the global config.
hooks
Declares additional request-processing functions.
log
Configures the logging for each application.
These can only be declared in the global or / config.
request
Adds attributes to each Request.
response
Adds attributes to each Response.
server
Controls the default HTTP server via cherrypy.server.
These can only be declared in the global config.
tools
Runs and configures additional request-processing packages.
wsgi
Adds WSGI middleware to an Application's "pipeline".
These can only be declared in the app's root config ("/").
checker
Controls the 'checker', which looks for common errors in
app state (including config) when the engine starts.
Global config only.
The only key that does not exist in a namespace is the "environment" entry.
This special entry 'imports' other config entries from a template stored in
cherrypy._cpconfig.environments[environment]. It only applies to the global
config, and only when you use cherrypy.config.update.
You can define your own namespaces to be called at the Global, Application,
or Request level, by adding a named handler to cherrypy.config.namespaces,
app.namespaces, or app.request_class.namespaces. The name can
be any string, and the handler must be either a callable or a (Python 2.5
style) context manager.
"""
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import reprconf
def _if_filename_register_autoreload(ob):
"""Register for autoreload if ob is a string (presumed filename)."""
is_filename = isinstance(ob, text_or_bytes)
is_filename and cherrypy.engine.autoreload.files.add(ob)
def merge(base, other):
"""Merge one app config (from a dict, file, or filename) into another.
If the given config is a filename, it will be appended to the list
of files to monitor for "autoreload" changes.
"""
_if_filename_register_autoreload(other)
# Load other into base
for section, value_map in reprconf.Parser.load(other).items():
if not isinstance(value_map, dict):
raise ValueError(
'Application config must include section headers, but the '
"config you tried to merge doesn't have any sections. "
'Wrap your config in another dict with paths as section '
"headers, for example: {'/': config}.")
base.setdefault(section, {}).update(value_map)
class Config(reprconf.Config):
"""The 'global' configuration data for the entire CherryPy process."""
def update(self, config):
"""Update self from a dict, file or filename."""
_if_filename_register_autoreload(config)
super(Config, self).update(config)
def _apply(self, config):
"""Update self from a dict."""
if isinstance(config.get('global'), dict):
if len(config) > 1:
cherrypy.checker.global_config_contained_paths = True
config = config['global']
if 'tools.staticdir.dir' in config:
config['tools.staticdir.section'] = 'global'
super(Config, self)._apply(config)
@staticmethod
def __call__(**kwargs):
"""Decorate for page handlers to set _cp_config."""
def tool_decorator(f):
_Vars(f).setdefault('_cp_config', {}).update(kwargs)
return f
return tool_decorator
class _Vars(object):
"""Adapter allowing setting a default attribute on a function or class."""
def __init__(self, target):
self.target = target
def setdefault(self, key, default):
if not hasattr(self.target, key):
setattr(self.target, key, default)
return getattr(self.target, key)
# Sphinx begin config.environments
Config.environments = environments = {
'staging': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
},
'production': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
},
'embedded': {
# For use with CherryPy embedded in another deployment stack.
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
'engine.SIGHUP': None,
'engine.SIGTERM': None,
},
'test_suite': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': True,
'request.show_mismatched_params': True,
'log.screen': False,
},
}
# Sphinx end config.environments
def _server_namespace_handler(k, v):
"""Config handler for the "server" namespace."""
atoms = k.split('.', 1)
if len(atoms) > 1:
# Special-case config keys of the form 'server.servername.socket_port'
# to configure additional HTTP servers.
if not hasattr(cherrypy, 'servers'):
cherrypy.servers = {}
servername, k = atoms
if servername not in cherrypy.servers:
from cherrypy import _cpserver
cherrypy.servers[servername] = _cpserver.Server()
# On by default, but 'on = False' can unsubscribe it (see below).
cherrypy.servers[servername].subscribe()
if k == 'on':
if v:
cherrypy.servers[servername].subscribe()
else:
cherrypy.servers[servername].unsubscribe()
else:
setattr(cherrypy.servers[servername], k, v)
else:
setattr(cherrypy.server, k, v)
Config.namespaces['server'] = _server_namespace_handler
def _engine_namespace_handler(k, v):
"""Config handler for the "engine" namespace."""
engine = cherrypy.engine
if k in {'SIGHUP', 'SIGTERM'}:
engine.subscribe(k, v)
return
if '.' in k:
plugin, attrname = k.split('.', 1)
plugin = getattr(engine, plugin)
op = 'subscribe' if v else 'unsubscribe'
sub_unsub = getattr(plugin, op, None)
if attrname == 'on' and callable(sub_unsub):
sub_unsub()
return
setattr(plugin, attrname, v)
else:
setattr(engine, k, v)
Config.namespaces['engine'] = _engine_namespace_handler
def _tree_namespace_handler(k, v):
"""Namespace handler for the 'tree' config namespace."""
if isinstance(v, dict):
for script_name, app in v.items():
cherrypy.tree.graft(app, script_name)
msg = 'Mounted: %s on %s' % (app, script_name or '/')
cherrypy.engine.log(msg)
else:
cherrypy.tree.graft(v, v.script_name)
cherrypy.engine.log('Mounted: %s on %s' % (v, v.script_name or '/'))
Config.namespaces['tree'] = _tree_namespace_handler
| 9,646
|
Python
|
.py
| 229
| 35.69869
| 78
| 0.680141
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,587
|
_cpdispatch.py
|
rembo10_headphones/lib/cherrypy/_cpdispatch.py
|
"""CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info,
other request attributes, and the application architecture. The core
calls the dispatcher as early as possible, passing it a 'path_info'
argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
@property
def args(self):
"""The ordered args should be accessible from post dispatch hooks."""
return cherrypy.serving.request.args
@args.setter
def args(self, args):
cherrypy.serving.request.args = args
return cherrypy.serving.request.args
@property
def kwargs(self):
"""The named kwargs should be accessible from post dispatch hooks."""
return cherrypy.serving.request.kwargs
@kwargs.setter
def kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
return cherrypy.serving.request.kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except Exception:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the
handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw,
defaults) = getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and (
# For callable objects, which have a __call__(self) method
hasattr(callable, '__call__') or
# For normal methods
inspect.ismethod(callable)
):
# Strip 'self'
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message = 'Missing parameters: %s' % ','.join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message = 'Multiple values for parameters: '\
'%s' % ','.join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message = 'Unexpected query string '\
'parameters: %s' % ', '.join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message = 'Unexpected body parameters: '\
'%s' % ', '.join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
def test_callable_spec(callable, args, kwargs): # noqa: F811
return None
else:
def getargspec(callable):
return inspect.getfullargspec(callable)[:4]
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
@property
def kwargs(self):
"""Page handler kwargs (with cherrypy.request.params copied in)."""
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
@kwargs.setter
def kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
self._kwargs = kwargs
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError(
'The translate argument must be a str of len 256.')
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError('The translate argument must be a dict.')
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each
hierarchical component in the path_info argument is matched to a
corresponding nested attribute of the root object. Matching handlers
must have an 'exposed' attribute which evaluates to True. The
special method name "index" matches a URI which ends in a slash
("/"). The special method name "default" may match a portion of the
path_info (but only when no longer substring of the path_info
matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace('%2F', '/') for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler. These virtual
path components are passed to the handler as positional
arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, '_cp_config'):
nodeconf.update(root._cp_config)
if '/' in app.config:
nodeconf.update(app.config['/'])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
# Don't expose the hidden 'index' token to _cp_dispatch
# We skip this if pre_len == 1 since it makes no sense
# to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
# We didn't find a path, but keep processing in case there
# is a default() handler.
iternames.pop(0)
else:
# We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
# No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
'A vpath segment was added. Custom dispatchers may only '
'remove elements. While trying to process '
'{0} in {1}'.format(name, fullpath)
)
elif segleft == pre_len:
# Assume that the handler used the current path segment, but
# did not pop it. This allows things like
# return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, '_cp_config'):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config.
"""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + \
'/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, 'default'):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, '_cp_config', {})
object_trail.insert(
i + 1, ['default', defhandler, conf, segleft])
request.config = set_conf()
# See https://github.com/cherrypy/cherrypy/issues/613
request.is_index = path.endswith('/')
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to
# "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class. The
method names must be all caps; the appropriate Allow header will be
output showing all capitalized method names as allowable HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if 'GET' in avail and 'HEAD' not in avail:
avail.append('HEAD')
avail.sort()
cherrypy.serving.response.headers['Allow'] = ', '.join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == 'HEAD':
func = getattr(resource, 'GET', None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, '_cp_config'):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace('%2F', '/') for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False, **mapper_options):
"""Routes dispatcher.
Set full_result to True if you wish the controller and the
action to be passed on to the page handler parameters. By
default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper(**mapper_options)
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ''
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or '/'
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, '_cp_config'):
merge(root._cp_config)
if '/' in app.config:
merge(app.config['/'])
# Mix in values from app.config.
atoms = [x for x in path_info.split('/') if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = '/'.join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, '_cp_config'):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, '_cp_config'):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = '/'.join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True,
**domains):
"""Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header('X-Forwarded-Host', domain)
prefix = domains.get(domain, '')
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See
# https://github.com/cherrypy/cherrypy/issues/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| 25,194
|
Python
|
.py
| 564
| 34.147163
| 79
| 0.608002
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,588
|
__init__.py
|
rembo10_headphones/lib/cherrypy/scaffold/__init__.py
|
"""<MyProject>, a CherryPy application.
Use this as a base for creating new CherryPy applications. When you want
to make a new app, copy and paste this folder to some other location
(maybe site-packages) and rename it to the name of your project,
then tweak as desired.
Even before any tweaking, this should serve a few demonstration pages.
Change to this directory and run:
cherryd -c site.conf
"""
import cherrypy
from cherrypy import tools, url
import os
local_dir = os.path.join(os.getcwd(), os.path.dirname(__file__))
@cherrypy.config(**{'tools.log_tracebacks.on': True})
class Root:
"""Declaration of the CherryPy app URI structure."""
@cherrypy.expose
def index(self):
"""Render HTML-template at the root path of the web-app."""
return """<html>
<body>Try some <a href='%s?a=7'>other</a> path,
or a <a href='%s?n=14'>default</a> path.<br />
Or, just look at the pretty picture:<br />
<img src='%s' />
</body></html>""" % (url('other'), url('else'),
url('files/made_with_cherrypy_small.png'))
@cherrypy.expose
def default(self, *args, **kwargs):
"""Render catch-all args and kwargs."""
return 'args: %s kwargs: %s' % (args, kwargs)
@cherrypy.expose
def other(self, a=2, b='bananas', c=None):
"""Render number of fruits based on third argument."""
cherrypy.response.headers['Content-Type'] = 'text/plain'
if c is None:
return 'Have %d %s.' % (int(a), b)
else:
return 'Have %d %s, %s.' % (int(a), b, c)
files = tools.staticdir.handler(
section='/files',
dir=os.path.join(local_dir, 'static'),
# Ignore .php files, etc.
match=r'\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$',
)
root = Root()
# Uncomment the following to use your own favicon instead of CP's default.
# favicon_path = os.path.join(local_dir, "favicon.ico")
# root.favicon_ico = tools.staticfile.handler(filename=favicon_path)
| 1,996
|
Python
|
.py
| 48
| 36.354167
| 74
| 0.649431
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,589
|
reprconf.py
|
rembo10_headphones/lib/cherrypy/lib/reprconf.py
|
"""Generic configuration system using unrepr.
Configuration data may be supplied as a Python dictionary, as a filename,
or as an open file object. When you supply a filename or file, Python's
builtin ConfigParser is used (with some extensions).
Namespaces
----------
Configuration keys are separated into namespaces by the first "." in the key.
The only key that cannot exist in a namespace is the "environment" entry.
This special entry 'imports' other config entries from a template stored in
the Config.environments dict.
You can define your own namespaces to be called when new config is merged
by adding a named handler to Config.namespaces. The name can be any string,
and the handler must be either a callable or a context manager.
"""
import builtins
import configparser
import operator
import sys
from cherrypy._cpcompat import text_or_bytes
class NamespaceSet(dict):
"""A dict of config namespace names and handlers.
Each config entry should begin with a namespace name; the
corresponding namespace handler will be called once for each config
entry in that namespace, and will be passed two arguments: the
config key (with the namespace removed) and the config value.
Namespace handlers may be any Python callable; they may also be
context managers, in which case their __enter__ method should return
a callable to be used as the handler. See cherrypy.tools (the
Toolbox class) for an example.
"""
def __call__(self, config):
"""Iterate through config and pass it to each namespace handler.
config
A flat dict, where keys use dots to separate
namespaces, and values are arbitrary.
The first name in each config key is used to look up the
corresponding namespace handler. For example, a config entry of
{'tools.gzip.on': v} will call the 'tools' namespace handler
with the args: ('gzip.on', v)
"""
# Separate the given config into namespaces
ns_confs = {}
for k in config:
if '.' in k:
ns, name = k.split('.', 1)
bucket = ns_confs.setdefault(ns, {})
bucket[name] = config[k]
# I chose __enter__ and __exit__ so someday this could be
# rewritten using 'with' statement:
# for ns, handler in self.items():
# with handler as callable:
# for k, v in ns_confs.get(ns, {}).items():
# callable(k, v)
for ns, handler in self.items():
exit = getattr(handler, '__exit__', None)
if exit:
callable = handler.__enter__()
no_exc = True
try:
try:
for k, v in ns_confs.get(ns, {}).items():
callable(k, v)
except Exception:
# The exceptional case is handled here
no_exc = False
if exit is None:
raise
if not exit(*sys.exc_info()):
raise
# The exception is swallowed if exit() returns true
finally:
# The normal and non-local-goto cases are handled here
if no_exc and exit:
exit(None, None, None)
else:
for k, v in ns_confs.get(ns, {}).items():
handler(k, v)
def __repr__(self):
return '%s.%s(%s)' % (self.__module__, self.__class__.__name__,
dict.__repr__(self))
def __copy__(self):
newobj = self.__class__()
newobj.update(self)
return newobj
copy = __copy__
class Config(dict):
"""A dict-like set of configuration data, with defaults and namespaces.
May take a file, filename, or dict.
"""
defaults = {}
environments = {}
namespaces = NamespaceSet()
def __init__(self, file=None, **kwargs):
self.reset()
if file is not None:
self.update(file)
if kwargs:
self.update(kwargs)
def reset(self):
"""Reset self to default values."""
self.clear()
dict.update(self, self.defaults)
def update(self, config):
"""Update self from a dict, file, or filename."""
self._apply(Parser.load(config))
def _apply(self, config):
"""Update self from a dict."""
which_env = config.get('environment')
if which_env:
env = self.environments[which_env]
for k in env:
if k not in config:
config[k] = env[k]
dict.update(self, config)
self.namespaces(config)
def __setitem__(self, k, v):
dict.__setitem__(self, k, v)
self.namespaces({k: v})
class Parser(configparser.ConfigParser):
"""Sub-class of ConfigParser that keeps the case of options and that
raises an exception if the file cannot be read.
"""
def optionxform(self, optionstr):
return optionstr
def read(self, filenames):
if isinstance(filenames, text_or_bytes):
filenames = [filenames]
for filename in filenames:
# try:
# fp = open(filename)
# except IOError:
# continue
with open(filename) as fp:
self._read(fp, filename)
def as_dict(self, raw=False, vars=None):
"""Convert an INI file to a dictionary."""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw=raw, vars=vars)
try:
value = unrepr(value)
except Exception:
x = sys.exc_info()[1]
msg = ('Config error in section: %r, option: %r, '
'value: %r. Config values must be valid Python.' %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
def dict_from_file(self, file):
if hasattr(file, 'read'):
self.read_file(file)
else:
self.read(file)
return self.as_dict()
@classmethod
def load(self, input):
"""Resolve 'input' to dict from a dict, file, or filename."""
is_file = (
# Filename
isinstance(input, text_or_bytes)
# Open file object
or hasattr(input, 'read')
)
return Parser().dict_from_file(input) if is_file else input.copy()
# public domain "unrepr" implementation, found on the web and then improved.
class _Builder:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError('unrepr does not recognize %s' %
repr(o.__class__.__name__))
return m(o)
def astnode(self, s):
"""Return a Python3 ast Node compiled from a string."""
try:
import ast
except ImportError:
# Fallback to eval when ast package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = ast.parse('__tempvalue__ = ' + s)
return p.body[0].value
def build_Subscript(self, o):
return self.build(o.value)[self.build(o.slice)]
def build_Index(self, o):
return self.build(o.value)
def _build_call35(self, o):
"""
Workaround for python 3.5 _ast.Call signature, docs found here
https://greentreesnakes.readthedocs.org/en/latest/nodes.html
"""
import ast
callee = self.build(o.func)
args = []
if o.args is not None:
for a in o.args:
if isinstance(a, ast.Starred):
args.append(self.build(a.value))
else:
args.append(self.build(a))
kwargs = {}
for kw in o.keywords:
if kw.arg is None: # double asterix `**`
rst = self.build(kw.value)
if not isinstance(rst, dict):
raise TypeError('Invalid argument for call.'
'Must be a mapping object.')
# give preference to the keys set directly from arg=value
for k, v in rst.items():
if k not in kwargs:
kwargs[k] = v
else: # defined on the call as: arg=value
kwargs[kw.arg] = self.build(kw.value)
return callee(*args, **kwargs)
def build_Call(self, o):
if sys.version_info >= (3, 5):
return self._build_call35(o)
callee = self.build(o.func)
if o.args is None:
args = ()
else:
args = tuple([self.build(a) for a in o.args])
if o.starargs is None:
starargs = ()
else:
starargs = tuple(self.build(o.starargs))
if o.kwargs is None:
kwargs = {}
else:
kwargs = self.build(o.kwargs)
if o.keywords is not None: # direct a=b keywords
for kw in o.keywords:
# preference because is a direct keyword against **kwargs
kwargs[kw.arg] = self.build(kw.value)
return callee(*(args + starargs), **kwargs)
def build_List(self, o):
return list(map(self.build, o.elts))
def build_Str(self, o):
return o.s
def build_Num(self, o):
return o.n
def build_Dict(self, o):
return dict([(self.build(k), self.build(v))
for k, v in zip(o.keys, o.values)])
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.id
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
return getattr(builtins, name)
except AttributeError:
pass
raise TypeError('unrepr could not resolve the name %s' % repr(name))
def build_NameConstant(self, o):
return o.value
build_Constant = build_NameConstant # Python 3.8 change
def build_UnaryOp(self, o):
op, operand = map(self.build, [o.op, o.operand])
return op(operand)
def build_BinOp(self, o):
left, op, right = map(self.build, [o.left, o.op, o.right])
return op(left, right)
def build_Add(self, o):
return operator.add
def build_Mult(self, o):
return operator.mul
def build_USub(self, o):
return operator.neg
def build_Attribute(self, o):
parent = self.build(o.value)
return getattr(parent, o.attr)
def build_NoneType(self, o):
return None
def unrepr(s):
"""Return a Python object compiled from a string."""
if not s:
return s
b = _Builder()
obj = b.astnode(s)
return b.build(obj)
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
__import__(modulePath)
return sys.modules[modulePath]
def attributes(full_attribute_name):
"""Load a module and retrieve an attribute of that module."""
# Parse out the path, module, and attribute
last_dot = full_attribute_name.rfind('.')
attr_name = full_attribute_name[last_dot + 1:]
mod_path = full_attribute_name[:last_dot]
mod = modules(mod_path)
# Let an AttributeError propagate outward.
try:
attr = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
# Return a reference to the attribute.
return attr
| 12,336
|
Python
|
.py
| 316
| 28.636076
| 77
| 0.562924
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,590
|
sessions.py
|
rembo10_headphones/lib/cherrypy/lib/sessions.py
|
"""Session implementation for CherryPy.
You need to edit your config file to use sessions. Here's an example::
[/]
tools.sessions.on = True
tools.sessions.storage_class = cherrypy.lib.sessions.FileSession
tools.sessions.storage_path = "/home/site/sessions"
tools.sessions.timeout = 60
This sets the session to be stored in files in the directory
/home/site/sessions, and the session timeout to 60 minutes. If you omit
``storage_class``, the sessions will be saved in RAM.
``tools.sessions.on`` is the only required line for working sessions,
the rest are optional.
By default, the session ID is passed in a cookie, so the client's browser must
have cookies enabled for your site.
To set data for the current session, use
``cherrypy.session['fieldname'] = 'fieldvalue'``;
to get data use ``cherrypy.session.get('fieldname')``.
================
Locking sessions
================
By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means
the session is locked early and unlocked late. Be mindful of this default mode
for any requests that take a long time to process (streaming responses,
expensive calculations, database lookups, API calls, etc), as other concurrent
requests that also utilize sessions will hang until the session is unlocked.
If you want to control when the session data is locked and unlocked,
set ``tools.sessions.locking = 'explicit'``. Then call
``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``.
Regardless of which mode you use, the session is guaranteed to be unlocked when
the request is complete.
=================
Expiring Sessions
=================
You can force a session to expire with :func:`cherrypy.lib.sessions.expire`.
Simply call that function at the point you want the session to expire, and it
will cause the session cookie to expire client-side.
===========================
Session Fixation Protection
===========================
If CherryPy receives, via a request cookie, a session id that it does not
recognize, it will reject that id and create a new one to return in the
response cookie. This `helps prevent session fixation attacks
<http://en.wikipedia.org/wiki/Session_fixation#Regenerate_SID_on_each_request>`_.
However, CherryPy "recognizes" a session id by looking up the saved session
data for that id. Therefore, if you never save any session data,
**you will get a new session id for every request**.
A side effect of CherryPy overwriting unrecognised session ids is that if you
have multiple, separate CherryPy applications running on a single domain (e.g.
on different ports), each app will overwrite the other's session id because by
default they use the same cookie name (``"session_id"``) but do not recognise
each others sessions. It is therefore a good idea to use a different name for
each, for example::
[/]
...
tools.sessions.name = "my_app_session_id"
================
Sharing Sessions
================
If you run multiple instances of CherryPy (for example via mod_python behind
Apache prefork), you most likely cannot use the RAM session backend, since each
instance of CherryPy will have its own memory space. Use a different backend
instead, and verify that all instances are pointing at the same file or db
location. Alternately, you might try a load balancer which makes sessions
"sticky". Google is your friend, there.
================
Expiration Dates
================
The response cookie will possess an expiration date to inform the client at
which point to stop sending the cookie back in requests. If the server time
and client time differ, expect sessions to be unreliable. **Make sure the
system time of your server is accurate**.
CherryPy defaults to a 60-minute session timeout, which also applies to the
cookie which is sent to the client. Unfortunately, some versions of Safari
("4 public beta" on Windows XP at least) appear to have a bug in their parsing
of the GMT expiration date--they appear to interpret the date as one hour in
the past. Sixty minutes minus one hour is pretty close to zero, so you may
experience this bug as a new session id for every request, unless the requests
are less than one second apart. To fix, try increasing the session.timeout.
On the other extreme, some users report Firefox sending cookies after their
expiration date, although this was on a system with an inaccurate system time.
Maybe FF doesn't trust system time.
"""
import sys
import datetime
import os
import time
import threading
import binascii
import pickle
import zc.lockfile
import cherrypy
from cherrypy.lib import httputil
from cherrypy.lib import locking
from cherrypy.lib import is_iterator
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
_id = None
id_observers = None
"A list of callbacks to which to pass new id's."
@property
def id(self):
"""Return the current session id."""
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
'Number of minutes after which to delete session data.'
locked = False
"""
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
"""If True, data has been retrieved from storage.
This should happen automatically on the first attempt to access
session data.
"""
clean_thread = None
'Class-level Monitor which calls self.clean_up.'
clean_freq = 5
'The poll rate for expired session cleanup in minutes.'
originalid = None
'The session id passed by the client. May be missing or unsafe.'
missing = False
'True if the session requested by the client did not exist.'
regenerated = False
"""True if the application called session.regenerate().
This is not set by internal calls to regenerate the session id.
"""
debug = False
'If True, log debug information.'
# --------------------- Session management methods --------------------- #
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See https://github.com/cherrypy/cherrypy/issues/709.
self.id = None
self.missing = True
self._regenerate()
def now(self):
"""Generate the session specific concept of 'now'.
Other session providers can override this to use alternative,
possibly timezone aware, versions of 'now'.
"""
return datetime.datetime.now()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS')
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id,
'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
"""Clean up expired sessions."""
pass
def generate_id(self):
"""Return a new session id."""
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log('Saving session %r with expiry %s' %
(self.id, expiration_time),
'TOOLS.SESSIONS')
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).' %
self.id, 'TOOLS.SESSIONS')
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log('Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS')
self._data = {}
else:
if self.debug:
cherrypy.log('Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS')
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is an instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
"""Delete stored session data."""
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id,
'TOOLS.SESSIONS')
# -------------------- Application accessor methods -------------------- #
def __getitem__(self, key):
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise
KeyError is raised.
"""
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d.
d defaults to None.
"""
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None.
Update D from E: for k in E: D[k] = E[k].
"""
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None.
Remove all items from D.
"""
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded:
self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded:
self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = (
_id not in self.cache
and self.locks[_id].acquire(blocking=False)
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path
The folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
lock_timeout
A timedelta or numeric seconds indicating how long
to block acquiring a lock. If None (default), acquiring a lock
will block indefinitely.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
kwargs.setdefault('lock_timeout', None)
Session.__init__(self, id=id, **kwargs)
# validate self.lock_timeout
if isinstance(self.lock_timeout, (int, float)):
self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)
if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):
raise ValueError(
'Lock timeout must be numeric seconds or a timedelta instance.'
)
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool
does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, 'Invalid session id in cookie.')
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
with open(path, 'rb') as f:
return pickle.load(f)
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def _save(self, expiration_time):
assert self.locked, ('The session was saved without being locked. '
"Check your tools' priority levels.")
with open(self._get_file_path(), 'wb') as f:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
def _delete(self):
assert self.locked, ('The session deletion without being locked. '
"Check your tools' priority levels.")
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = zc.lockfile.LockFile(path)
except zc.lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
self.lock.close()
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
have_session = (
fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)
)
if have_session:
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
if self.debug:
# This is a bit of a hack, since we're calling clean_up
# on the first instance rather than the entire class,
# so depending on whether you have "debug" set on the
# path of the first session called, this may not run.
cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX) and
not fname.endswith(self.LOCK_SUFFIX))])
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a separate set of locks per session id.
locks = {}
servers = ['localhost:11211']
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool
does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError(
'Session data for id %r not set.' % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, 'session'):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, '_sessionsaved'):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if is_iterator(response.body):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, 'session', None)
if getattr(sess, 'locked', False):
# If the session is still locked we release the lock
sess.release_lock()
if sess.debug:
cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
close.failsafe = True
close.priority = 90
def init(storage_type=None, path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, httponly=False, debug=False,
# Py27 compat
# *, storage_class=RamSession,
**kwargs):
"""Initialize session object (using cookies).
storage_class
The Session subclass to use. Defaults to RamSession.
storage_type
(deprecated)
One of 'ram', 'file', memcached'. This will be
used to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path
The 'path' value to stick in the response cookie metadata.
path_header
If 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
The name of the cookie.
timeout
The expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain
The cookie domain.
secure
If False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes)
The poll rate for expired session cleanup.
persistent
If True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
# Py27 compat
storage_class = kwargs.pop('storage_class', RamSession)
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, '_session_init_flag'):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
first_time = not hasattr(cherrypy, 'session')
if storage_type:
if first_time:
msg = 'storage_type is deprecated. Supply storage_class instead'
cherrypy.log(msg)
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
# call setup first time only
if first_time:
if hasattr(storage_class, 'setup'):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, 'session'):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure,
httponly=httponly)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, httponly=False):
"""Set a response cookie for the client.
path
the 'path' value to stick in the response cookie metadata.
path_header
if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
the name of the cookie.
timeout
the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain
the cookie domain.
secure
if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (
path or
cherrypy.serving.request.headers.get(path_header) or
'/'
)
if timeout:
cookie[name]['max-age'] = timeout * 60
_add_MSIE_max_age_workaround(cookie[name], timeout)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
if httponly:
if not cookie[name].isReservedKey('httponly'):
raise ValueError('The httponly cookie token is not supported.')
cookie[name]['httponly'] = 1
def _add_MSIE_max_age_workaround(cookie, timeout):
"""
We'd like to use the "max-age" param as indicated in
http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
save it to disk and the session is lost if people close
the browser. So we have to use the old "expires" ... sigh ...
"""
expires = time.time() + timeout * 60
cookie['expires'] = httputil.HTTPDate(expires)
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get(
'tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
cherrypy.serving.response.cookie[name].pop('max-age', None)
| 30,969
|
Python
|
.py
| 741
| 32.709852
| 81
| 0.612792
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,591
|
locking.py
|
rembo10_headphones/lib/cherrypy/lib/locking.py
|
import datetime
class NeverExpires(object):
def expired(self):
return False
class Timer(object):
"""A simple timer that will indicate when an expiration time has passed."""
def __init__(self, expiration):
'Create a timer that expires at `expiration` (UTC datetime)'
self.expiration = expiration
@classmethod
def after(cls, elapsed):
"""Return a timer that will expire after `elapsed` passes."""
return cls(
datetime.datetime.now(datetime.timezone.utc) + elapsed,
)
def expired(self):
return datetime.datetime.now(
datetime.timezone.utc,
) >= self.expiration
class LockTimeout(Exception):
'An exception when a lock could not be acquired before a timeout period'
class LockChecker(object):
"""Keep track of the time and detect if a timeout has expired."""
def __init__(self, session_id, timeout):
self.session_id = session_id
if timeout:
self.timer = Timer.after(timeout)
else:
self.timer = NeverExpires()
def expired(self):
if self.timer.expired():
raise LockTimeout(
'Timeout acquiring lock for %(session_id)s' % vars(self))
return False
| 1,269
|
Python
|
.py
| 34
| 29.529412
| 79
| 0.642157
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,592
|
profiler.py
|
rembo10_headphones/lib/cherrypy/lib/profiler.py
|
"""Profiler tools for CherryPy.
CherryPy users
==============
You can profile any of your pages as follows::
from cherrypy.lib import profiler
class Root:
p = profiler.Profiler("/path/to/profile/dir")
@cherrypy.expose
def index(self):
self.p.run(self._index)
def _index(self):
return "Hello, world!"
cherrypy.tree.mount(Root())
You can also turn on profiling for all requests
using the ``make_app`` function as WSGI middleware.
CherryPy developers
===================
This module can be used whenever you make changes to CherryPy,
to get a quick sanity-check on overall CP performance. Use the
``--profile`` flag when running the test suite. Then, use the ``serve()``
function to browse the results in a web browser. If you run this
module from the command line, it will call ``serve()`` for you.
"""
import io
import os
import os.path
import sys
import warnings
import cherrypy
try:
import profile
import pstats
def new_func_strip_path(func_name):
"""Add ``__init__`` modules' parents.
This makes the profiler output more readable.
"""
filename, line, name = func_name
if filename.endswith('__init__.py'):
return (
os.path.basename(filename[:-12]) + filename[-12:],
line,
name,
)
return os.path.basename(filename), line, name
pstats.func_strip_path = new_func_strip_path
except ImportError:
profile = None
pstats = None
_count = 0
class Profiler(object):
def __init__(self, path=None):
if not path:
path = os.path.join(os.path.dirname(__file__), 'profile')
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def run(self, func, *args, **params):
"""Dump profile data into self.path."""
global _count
c = _count = _count + 1
path = os.path.join(self.path, 'cp_%04d.prof' % c)
prof = profile.Profile()
result = prof.runcall(func, *args, **params)
prof.dump_stats(path)
return result
def statfiles(self):
""":rtype: list of available profiles.
"""
return [f for f in os.listdir(self.path)
if f.startswith('cp_') and f.endswith('.prof')]
def stats(self, filename, sortby='cumulative'):
""":rtype stats(index): output of print_stats() for the given profile.
"""
sio = io.StringIO()
if sys.version_info >= (2, 5):
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
s.strip_dirs()
s.sort_stats(sortby)
s.print_stats()
else:
# pstats.Stats before Python 2.5 didn't take a 'stream' arg,
# but just printed to stdout. So re-route stdout.
s = pstats.Stats(os.path.join(self.path, filename))
s.strip_dirs()
s.sort_stats(sortby)
oldout = sys.stdout
try:
sys.stdout = sio
s.print_stats()
finally:
sys.stdout = oldout
response = sio.getvalue()
sio.close()
return response
@cherrypy.expose
def index(self):
return """<html>
<head><title>CherryPy profile data</title></head>
<frameset cols='200, 1*'>
<frame src='menu' />
<frame name='main' src='' />
</frameset>
</html>
"""
@cherrypy.expose
def menu(self):
yield '<h2>Profiling runs</h2>'
yield '<p>Click on one of the runs below to see profiling data.</p>'
runs = self.statfiles()
runs.sort()
for i in runs:
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (
i, i)
@cherrypy.expose
def report(self, filename):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.stats(filename)
class ProfileAggregator(Profiler):
def __init__(self, path=None):
Profiler.__init__(self, path)
global _count
self.count = _count = _count + 1
self.profiler = profile.Profile()
def run(self, func, *args, **params):
path = os.path.join(self.path, 'cp_%04d.prof' % self.count)
result = self.profiler.runcall(func, *args, **params)
self.profiler.dump_stats(path)
return result
class make_app:
def __init__(self, nextapp, path=None, aggregate=False):
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
nextapp
the WSGI application to wrap, usually an instance of
cherrypy.Application.
path
where to dump the profiling output.
aggregate
if True, profile data for all HTTP requests will go in
a single file. If False (the default), each HTTP request will
dump its profile data into a separate file.
"""
if profile is None or pstats is None:
msg = ('Your installation of Python does not have a profile '
"module. If you're on Debian, try "
'`sudo apt-get install python-profiler`. '
'See http://www.cherrypy.org/wiki/ProfilingOnDebian '
'for details.')
warnings.warn(msg)
self.nextapp = nextapp
self.aggregate = aggregate
if aggregate:
self.profiler = ProfileAggregator(path)
else:
self.profiler = Profiler(path)
def __call__(self, environ, start_response):
def gather():
result = []
for line in self.nextapp(environ, start_response):
result.append(line)
return result
return self.profiler.run(gather)
def serve(path=None, port=8080):
if profile is None or pstats is None:
msg = ('Your installation of Python does not have a profile module. '
"If you're on Debian, try "
'`sudo apt-get install python-profiler`. '
'See http://www.cherrypy.org/wiki/ProfilingOnDebian '
'for details.')
warnings.warn(msg)
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': 'production',
})
cherrypy.quickstart(Profiler(path))
if __name__ == '__main__':
serve(*tuple(sys.argv[1:]))
| 6,555
|
Python
|
.py
| 176
| 27.9375
| 79
| 0.578399
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,593
|
xmlrpcutil.py
|
rembo10_headphones/lib/cherrypy/lib/xmlrpcutil.py
|
"""XML-RPC tool helpers."""
import sys
from xmlrpc.client import (
loads as xmlrpc_loads, dumps as xmlrpc_dumps,
Fault as XMLRPCFault
)
import cherrypy
from cherrypy._cpcompat import ntob
def process_body():
"""Return (params, method) from request body."""
try:
return xmlrpc_loads(cherrypy.request.body.read())
except Exception:
return ('ERROR PARAMS', ), 'ERRORMETHOD'
def patched_path(path):
"""Return 'path', doctored for RPC."""
if not path.endswith('/'):
path += '/'
if path.startswith('/RPC2/'):
# strip the first /rpc2
path = path[5:]
return path
def _set_response(body):
"""Set up HTTP status, headers and body within CherryPy."""
# The XML-RPC spec (http://www.xmlrpc.com/spec) says:
# "Unless there's a lower-level error, always return 200 OK."
# Since Python's xmlrpc_client interprets a non-200 response
# as a "Protocol Error", we'll just return 200 every time.
response = cherrypy.response
response.status = '200 OK'
response.body = ntob(body, 'utf-8')
response.headers['Content-Type'] = 'text/xml'
response.headers['Content-Length'] = len(body)
def respond(body, encoding='utf-8', allow_none=0):
"""Construct HTTP response body."""
if not isinstance(body, XMLRPCFault):
body = (body,)
_set_response(
xmlrpc_dumps(
body, methodresponse=1,
encoding=encoding,
allow_none=allow_none
)
)
def on_error(*args, **kwargs):
"""Construct HTTP response body for an error response."""
body = str(sys.exc_info()[1])
_set_response(xmlrpc_dumps(XMLRPCFault(1, body)))
| 1,684
|
Python
|
.py
| 48
| 29.583333
| 65
| 0.652709
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,594
|
httputil.py
|
rembo10_headphones/lib/cherrypy/lib/httputil.py
|
"""HTTP library functions.
This module contains functions for building an HTTP application
framework: any one, not just one whose name starts with "Ch". ;) If you
reference any modules from some popular framework inside *this* module,
FuManChu will personally hang you up by your thumbs and submit you
to a public caning.
"""
import functools
import email.utils
import re
import builtins
from binascii import b2a_base64
from cgi import parse_header
from email.header import decode_header
from http.server import BaseHTTPRequestHandler
from urllib.parse import unquote_plus
import jaraco.collections
import cherrypy
from cherrypy._cpcompat import ntob, ntou
response_codes = BaseHTTPRequestHandler.responses.copy()
# From https://github.com/cherrypy/cherrypy/issues/361
response_codes[500] = ('Internal Server Error',
'The server encountered an unexpected condition '
'which prevented it from fulfilling the request.')
response_codes[503] = ('Service Unavailable',
'The server is currently unable to handle the '
'request due to a temporary overloading or '
'maintenance of the server.')
HTTPDate = functools.partial(email.utils.formatdate, usegmt=True)
def urljoin(*atoms):
r"""Return the given path \*atoms, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = '/'.join([x for x in atoms if x])
while '//' in url:
url = url.replace('//', '/')
# Special-case the final url of "", and return "/" instead.
return url or '/'
def urljoin_bytes(*atoms):
"""Return the given path `*atoms`, joined into a single URL.
This will correctly join a SCRIPT_NAME and PATH_INFO into the
original URL, even if either atom is blank.
"""
url = b'/'.join([x for x in atoms if x])
while b'//' in url:
url = url.replace(b'//', b'/')
# Special-case the final url of "", and return "/" instead.
return url or b'/'
def protocol_from_http(protocol_str):
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
return int(protocol_str[5]), int(protocol_str[7])
def get_ranges(headervalue, content_length):
"""Return a list of (start, stop) indices from a Range header, or None.
Each (start, stop) tuple will be composed of two ints, which are
suitable for use in a slicing operation. That is, the header "Range:
bytes=3-6", if applied against a Python string, is requesting
resource[3:7]. This function will return the list [(3, 7)].
If this function returns an empty list, you should return HTTP 416.
"""
if not headervalue:
return None
result = []
bytesunit, byteranges = headervalue.split('=', 1)
for brange in byteranges.split(','):
start, stop = [x.strip() for x in brange.split('-', 1)]
if start:
if not stop:
stop = content_length - 1
start, stop = int(start), int(stop)
if start >= content_length:
# From rfc 2616 sec 14.16:
# "If the server receives a request (other than one
# including an If-Range request-header field) with an
# unsatisfiable Range request-header field (that is,
# all of whose byte-range-spec values have a first-byte-pos
# value greater than the current length of the selected
# resource), it SHOULD return a response code of 416
# (Requested range not satisfiable)."
continue
if stop < start:
# From rfc 2616 sec 14.16:
# "If the server ignores a byte-range-spec because it
# is syntactically invalid, the server SHOULD treat
# the request as if the invalid Range header field
# did not exist. (Normally, this means return a 200
# response containing the full entity)."
return None
result.append((start, stop + 1))
else:
if not stop:
# See rfc quote above.
return None
# Negative subscript (last N bytes)
#
# RFC 2616 Section 14.35.1:
# If the entity is shorter than the specified suffix-length,
# the entire entity-body is used.
if int(stop) > content_length:
result.append((0, content_length))
else:
result.append((content_length - int(stop), content_length))
return result
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __cmp__(self, other):
return builtins.cmp(self.value, other.value)
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [';%s=%s' % (k, v) for k, v in self.params.items()]
return str('%s%s' % (self.value, ''.join(p)))
def __bytes__(self):
return ntob(self.__str__())
def __unicode__(self):
return ntou(self.__str__())
@staticmethod
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
initial_value, params = parse_header(elementstr)
return initial_value, params
@classmethod
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
q_separator = re.compile(r'; *q *=')
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will
be "less than" the less-preferred object. They are also therefore
sortable; if you sort a list of AcceptElement objects, they will be
listed in priority order; the most preferred value will be first.
Yes, it should have been the other way around, but it's too late to
fix now.
"""
@classmethod
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params['q'] = qvalue
return cls(media_type, params)
@property
def qvalue(self):
'The qvalue, or priority, of this value.'
val = self.params.get('q', '1')
if isinstance(val, HeaderElement):
val = val.value
try:
return float(val)
except ValueError as val_err:
"""Fail client requests with invalid quality value.
Ref: https://github.com/cherrypy/cherrypy/issues/1370
"""
raise cherrypy.HTTPError(
400,
'Malformed HTTP header: `{}`'.
format(str(self)),
) from val_err
def __cmp__(self, other):
diff = builtins.cmp(self.qvalue, other.qvalue)
if diff == 0:
diff = builtins.cmp(str(self), str(other))
return diff
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in RE_HEADER_SPLIT.split(fieldvalue):
if fieldname.startswith('Accept') or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
def decode_TEXT(value):
r"""Decode :rfc:`2047` TEXT.
>>> decode_TEXT("=?utf-8?q?f=C3=BCr?=") == b'f\xfcr'.decode('latin-1')
True
"""
atoms = decode_header(value)
decodedvalue = ''
for atom, charset in atoms:
if charset is not None:
atom = atom.decode(charset)
decodedvalue += atom
return decodedvalue
def decode_TEXT_maybe(value):
"""Decode the text but only if '=?' appears in it."""
return decode_TEXT(value) if '=?' in value else value
def valid_status(status):
"""Return legal HTTP status Code, Reason-phrase and Message.
The status arg must be an int, a str that begins with an int
or the constant from ``http.client`` stdlib module.
If status has no reason-phrase is supplied, a default reason-
phrase will be provided.
>>> import http.client
>>> from http.server import BaseHTTPRequestHandler
>>> valid_status(http.client.ACCEPTED) == (
... int(http.client.ACCEPTED),
... ) + BaseHTTPRequestHandler.responses[http.client.ACCEPTED]
True
"""
if not status:
status = 200
code, reason = status, None
if isinstance(status, str):
code, _, reason = status.partition(' ')
reason = reason.strip() or None
try:
code = int(code)
except (TypeError, ValueError):
raise ValueError('Illegal response status from server '
'(%s is non-numeric).' % repr(code))
if code < 100 or code > 599:
raise ValueError('Illegal response status from server '
'(%s is out of range).' % repr(code))
if code not in response_codes:
# code is unknown but not illegal
default_reason, message = '', ''
else:
default_reason, message = response_codes[code]
if reason is None:
reason = default_reason
return code, reason, message
# NOTE: the parse_qs functions that follow are modified version of those
# in the python3.0 source - we need to pass through an encoding to the unquote
# method, but the default parse_qs function doesn't allow us to. These do.
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a dict, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
d = {}
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote_plus(nv[0], encoding, errors='strict')
value = unquote_plus(nv[1], encoding, errors='strict')
if name in d:
if not isinstance(d[name], list):
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
return d
image_map_pattern = re.compile(r'[0-9]+,[0-9]+')
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
"""Build a params dictionary from a query_string.
Duplicate key/value pairs in the provided query_string will be
returned as {'key': [val1, val2, ...]}. Single key/values will
be returned as strings: {'key': 'value'}.
"""
if image_map_pattern.match(query_string):
# Server-side image map. Map the coords to 'x' and 'y'
# (like CGI::Request does).
pm = query_string.split(',')
pm = {'x': int(pm[0]), 'y': int(pm[1])}
else:
pm = _parse_qs(query_string, keep_blank_values, encoding=encoding)
return pm
class CaseInsensitiveDict(jaraco.collections.KeyTransformingDict):
"""A case-insensitive dict subclass.
Each key is changed on entry to title case.
"""
@staticmethod
def transform_key(key):
if key is None:
# TODO(#1830): why?
return 'None'
return key.title()
# TEXT = <any OCTET except CTLs, but including LWS>
#
# A CRLF is allowed in the definition of TEXT only as part of a header
# field continuation. It is expected that the folding LWS will be
# replaced with a single SP before interpretation of the TEXT value."
if str == bytes:
header_translate_table = ''.join([chr(i) for i in range(256)])
header_translate_deletechars = ''.join(
[chr(i) for i in range(32)]) + chr(127)
else:
header_translate_table = None
header_translate_deletechars = bytes(range(32)) + bytes([127])
class HeaderMap(CaseInsensitiveDict):
"""A dict subclass for HTTP request and response headers.
Each key is changed on entry to str(key).title(). This allows headers
to be case-insensitive and avoid duplicates.
Values are header values (decoded according to :rfc:`2047` if necessary).
"""
protocol = (1, 1)
encodings = ['ISO-8859-1']
# Someday, when http-bis is done, this will probably get dropped
# since few servers, clients, or intermediaries do it. But until then,
# we're going to obey the spec as is.
# "Words of *TEXT MAY contain characters from character sets other than
# ISO-8859-1 only when encoded according to the rules of RFC 2047."
use_rfc_2047 = True
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
return header_elements(self.transform_key(key), self.get(key))
def values(self, key):
"""Return a sorted list of HeaderElement.value for the given header."""
return [e.value for e in self.elements(key)]
def output(self):
"""Transform self into a list of (name, value) tuples."""
return list(self.encode_header_items(self.items()))
@classmethod
def encode_header_items(cls, header_items):
"""
Prepare the sequence of name, value tuples into a form suitable for
transmitting on the wire for HTTP.
"""
for k, v in header_items:
if not isinstance(v, str) and not isinstance(v, bytes):
v = str(v)
yield tuple(map(cls.encode_header_item, (k, v)))
@classmethod
def encode_header_item(cls, item):
if isinstance(item, str):
item = cls.encode(item)
# See header_translate_* constants above.
# Replace only if you really know what you're doing.
return item.translate(
header_translate_table, header_translate_deletechars)
@classmethod
def encode(cls, v):
"""Return the given header name or value, encoded for HTTP output."""
for enc in cls.encodings:
try:
return v.encode(enc)
except UnicodeEncodeError:
continue
if cls.protocol == (1, 1) and cls.use_rfc_2047:
# Encode RFC-2047 TEXT
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
# We do our own here instead of using the email module
# because we never want to fold lines--folding has
# been deprecated by the HTTP working group.
v = b2a_base64(v.encode('utf-8'))
return (b'=?utf-8?b?' + v.strip(b'\n') + b'?=')
raise ValueError('Could not encode header part %r using '
'any of the encodings %r.' %
(v, cls.encodings))
class Host(object):
"""An internet address.
name
Should be the client's host name. If not available (because no DNS
lookup is performed), the IP address should be used instead.
"""
ip = '0.0.0.0'
port = 80
name = 'unknown.tld'
def __init__(self, ip, port, name=None):
self.ip = ip
self.port = port
if name is None:
name = ip
self.name = name
def __repr__(self):
return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)
class SanitizedHost(str):
r"""
Wraps a raw host header received from the network in
a sanitized version that elides dangerous characters.
>>> SanitizedHost('foo\nbar')
'foobar'
>>> SanitizedHost('foo\nbar').raw
'foo\nbar'
A SanitizedInstance is only returned if sanitization was performed.
>>> isinstance(SanitizedHost('foobar'), SanitizedHost)
False
"""
dangerous = re.compile(r'[\n\r]')
def __new__(cls, raw):
sanitized = cls._sanitize(raw)
if sanitized == raw:
return raw
instance = super().__new__(cls, sanitized)
instance.raw = raw
return instance
@classmethod
def _sanitize(cls, raw):
return cls.dangerous.sub('', raw)
| 17,992
|
Python
|
.py
| 425
| 34.098824
| 79
| 0.622314
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,595
|
auth_digest.py
|
rembo10_headphones/lib/cherrypy/lib/auth_digest.py
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
"""HTTP Digest Authentication tool.
An implementation of the server-side of HTTP Digest Access
Authentication, which is described in :rfc:`2617`.
Example usage, using the built-in get_ha1_dict_plain function which uses a dict
of plaintext passwords as the credentials store::
userpassdict = {'alice' : '4x5istwelve'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict)
digest_auth = {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'wonderland',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
'tools.auth_digest.accept_charset': 'UTF-8',
}
app_config = { '/' : digest_auth }
"""
import time
import functools
from hashlib import md5
from urllib.request import parse_http_list, parse_keqv_list
import cherrypy
from cherrypy._cpcompat import ntob, tonative
__author__ = 'visteya'
__date__ = 'April 2009'
def md5_hex(s):
return md5(ntob(s, 'utf-8')).hexdigest()
qop_auth = 'auth'
qop_auth_int = 'auth-int'
valid_qops = (qop_auth, qop_auth_int)
valid_algorithms = ('MD5', 'MD5-sess')
FALLBACK_CHARSET = 'ISO-8859-1'
DEFAULT_CHARSET = 'UTF-8'
def TRACE(msg):
cherrypy.log(msg, context='TOOLS.AUTH_DIGEST')
# Three helper functions for users of the tool, providing three variants
# of get_ha1() functions for three different kinds of credential stores.
def get_ha1_dict_plain(user_password_dict):
"""Return a get_ha1 function which obtains a plaintext password from a
dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, with plaintext
passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the
get_ha1 argument to digest_auth().
"""
def get_ha1(realm, username):
password = user_password_dict.get(username)
if password:
return md5_hex('%s:%s:%s' % (username, realm, password))
return None
return get_ha1
def get_ha1_dict(user_ha1_dict):
"""Return a get_ha1 function which obtains a HA1 password hash from a
dictionary of the form: {username : HA1}.
If you want a dictionary-based authentication scheme, but with
pre-computed HA1 hashes instead of plain-text passwords, use
get_ha1_dict(my_userha1_dict) as the value for the get_ha1
argument to digest_auth().
"""
def get_ha1(realm, username):
return user_ha1_dict.get(username)
return get_ha1
def get_ha1_file_htdigest(filename):
"""Return a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be::
alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c
If you want to use an Apache htdigest file as the credentials store,
then use get_ha1_file_htdigest(my_htdigest_file) as the value for the
get_ha1 argument to digest_auth(). It is recommended that the filename
argument be an absolute path, to avoid problems.
"""
def get_ha1(realm, username):
result = None
with open(filename, 'r') as f:
for line in f:
u, r, ha1 = line.rstrip().split(':')
if u == username and r == realm:
result = ha1
break
return result
return get_ha1
def synthesize_nonce(s, key, timestamp=None):
"""Synthesize a nonce value which resists spoofing and can be checked
for staleness. Returns a string suitable as the value for 'nonce' in
the www-authenticate header.
s
A string related to the resource, such as the hostname of the server.
key
A secret string known only to the server.
timestamp
An integer seconds-since-the-epoch timestamp
"""
if timestamp is None:
timestamp = int(time.time())
h = md5_hex('%s:%s:%s' % (timestamp, s, key))
nonce = '%s:%s' % (timestamp, h)
return nonce
def H(s):
"""The hash function H."""
return md5_hex(s)
def _try_decode_header(header, charset):
global FALLBACK_CHARSET
for enc in (charset, FALLBACK_CHARSET):
try:
return tonative(ntob(tonative(header, 'latin1'), 'latin1'), enc)
except ValueError as ve:
last_err = ve
else:
raise last_err
class HttpDigestAuthorization(object):
"""
Parses a Digest Authorization header and performs
re-calculation of the digest.
"""
scheme = 'digest'
def errmsg(self, s):
return 'Digest Authorization header: %s' % s
@classmethod
def matches(cls, header):
scheme, _, _ = header.partition(' ')
return scheme.lower() == cls.scheme
def __init__(
self, auth_header, http_method,
debug=False, accept_charset=DEFAULT_CHARSET[:],
):
self.http_method = http_method
self.debug = debug
if not self.matches(auth_header):
raise ValueError('Authorization scheme is not "Digest"')
self.auth_header = _try_decode_header(auth_header, accept_charset)
scheme, params = self.auth_header.split(' ', 1)
# make a dict of the params
items = parse_http_list(params)
paramsd = parse_keqv_list(items)
self.realm = paramsd.get('realm')
self.username = paramsd.get('username')
self.nonce = paramsd.get('nonce')
self.uri = paramsd.get('uri')
self.method = paramsd.get('method')
self.response = paramsd.get('response') # the response digest
self.algorithm = paramsd.get('algorithm', 'MD5').upper()
self.cnonce = paramsd.get('cnonce')
self.opaque = paramsd.get('opaque')
self.qop = paramsd.get('qop') # qop
self.nc = paramsd.get('nc') # nonce count
# perform some correctness checks
if self.algorithm not in valid_algorithms:
raise ValueError(
self.errmsg("Unsupported value for algorithm: '%s'" %
self.algorithm))
has_reqd = (
self.username and
self.realm and
self.nonce and
self.uri and
self.response
)
if not has_reqd:
raise ValueError(
self.errmsg('Not all required parameters are present.'))
if self.qop:
if self.qop not in valid_qops:
raise ValueError(
self.errmsg("Unsupported value for qop: '%s'" % self.qop))
if not (self.cnonce and self.nc):
raise ValueError(
self.errmsg('If qop is sent then '
'cnonce and nc MUST be present'))
else:
if self.cnonce or self.nc:
raise ValueError(
self.errmsg('If qop is not sent, '
'neither cnonce nor nc can be present'))
def __str__(self):
return 'authorization : %s' % self.auth_header
def validate_nonce(self, s, key):
"""Validate the nonce.
Returns True if nonce was generated by synthesize_nonce() and the
timestamp is not spoofed, else returns False.
s
A string related to the resource, such as the hostname of
the server.
key
A secret string known only to the server.
Both s and key must be the same values which were used to synthesize
the nonce we are trying to validate.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
s_timestamp, s_hashpart = synthesize_nonce(
s, key, timestamp).split(':', 1)
is_valid = s_hashpart == hashpart
if self.debug:
TRACE('validate_nonce: %s' % is_valid)
return is_valid
except ValueError: # split() error
pass
return False
def is_nonce_stale(self, max_age_seconds=600):
"""Return True if a validated nonce is stale.
The nonce contains a timestamp in plaintext and also a secure
hash of the timestamp. You should first validate the nonce to
ensure the plaintext timestamp is not spoofed.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
if int(timestamp) + max_age_seconds > int(time.time()):
return False
except ValueError: # int() error
pass
if self.debug:
TRACE('nonce is stale')
return True
def HA2(self, entity_body=''):
"""Return the H(A2) string.
See :rfc:`2617` section 3.2.2.3.
"""
# RFC 2617 3.2.2.3
# If the "qop" directive's value is "auth" or is unspecified,
# then A2 is:
# A2 = method ":" digest-uri-value
#
# If the "qop" value is "auth-int", then A2 is:
# A2 = method ":" digest-uri-value ":" H(entity-body)
if self.qop is None or self.qop == 'auth':
a2 = '%s:%s' % (self.http_method, self.uri)
elif self.qop == 'auth-int':
a2 = '%s:%s:%s' % (self.http_method, self.uri, H(entity_body))
else:
# in theory, this should never happen, since I validate qop in
# __init__()
raise ValueError(self.errmsg('Unrecognized value for qop!'))
return H(a2)
def request_digest(self, ha1, entity_body=''):
"""Calculates the Request-Digest. See :rfc:`2617` section 3.2.2.1.
ha1
The HA1 string obtained from the credentials store.
entity_body
If 'qop' is set to 'auth-int', then A2 includes a hash
of the "entity body". The entity body is the part of the
message which follows the HTTP headers. See :rfc:`2617` section
4.3. This refers to the entity the user agent sent in the
request which has the Authorization header. Typically GET
requests don't have an entity, and POST requests do.
"""
ha2 = self.HA2(entity_body)
# Request-Digest -- RFC 2617 3.2.2.1
if self.qop:
req = '%s:%s:%s:%s:%s' % (
self.nonce, self.nc, self.cnonce, self.qop, ha2)
else:
req = '%s:%s' % (self.nonce, ha2)
# RFC 2617 3.2.2.2
#
# If the "algorithm" directive's value is "MD5" or is unspecified,
# then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
#
# If the "algorithm" directive's value is "MD5-sess", then A1 is
# calculated only once - on the first request by the client following
# receipt of a WWW-Authenticate challenge from the server.
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
if self.algorithm == 'MD5-sess':
ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce))
digest = H('%s:%s' % (ha1, req))
return digest
def _get_charset_declaration(charset):
global FALLBACK_CHARSET
charset = charset.upper()
return (
(', charset="%s"' % charset)
if charset != FALLBACK_CHARSET
else ''
)
def www_authenticate(
realm, key, algorithm='MD5', nonce=None, qop=qop_auth,
stale=False, accept_charset=DEFAULT_CHARSET[:],
):
"""Constructs a WWW-Authenticate header for Digest authentication."""
if qop not in valid_qops:
raise ValueError("Unsupported value for qop: '%s'" % qop)
if algorithm not in valid_algorithms:
raise ValueError("Unsupported value for algorithm: '%s'" % algorithm)
HEADER_PATTERN = (
'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"%s%s'
)
if nonce is None:
nonce = synthesize_nonce(realm, key)
stale_param = ', stale="true"' if stale else ''
charset_declaration = _get_charset_declaration(accept_charset)
return HEADER_PATTERN % (
realm, nonce, algorithm, qop, stale_param, charset_declaration,
)
def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'):
"""A CherryPy tool that hooks at before_handler to perform
HTTP Digest Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Digest' scheme,
this tool authenticates the credentials supplied in that header.
If the request has no 'authorization' header, or if it does but the
scheme is not "Digest", or if authentication fails, the tool sends
a 401 response with a 'WWW-Authenticate' Digest header.
realm
A string containing the authentication realm.
get_ha1
A callable that looks up a username in a credentials store
and returns the HA1 string, which is defined in the RFC to be
MD5(username : realm : password). The function's signature is:
``get_ha1(realm, username)``
where username is obtained from the request's 'authorization' header.
If username is not found in the credentials store, get_ha1() returns
None.
key
A secret string known only to the server, used in the synthesis
of nonces.
"""
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
respond_401 = functools.partial(
_respond_401, realm, key, accept_charset, debug)
if not HttpDigestAuthorization.matches(auth_header or ''):
respond_401()
msg = 'The Authorization header could not be parsed.'
with cherrypy.HTTPError.handle(ValueError, 400, msg):
auth = HttpDigestAuthorization(
auth_header, request.method,
debug=debug, accept_charset=accept_charset,
)
if debug:
TRACE(str(auth))
if not auth.validate_nonce(realm, key):
respond_401()
ha1 = get_ha1(realm, auth.username)
if ha1 is None:
respond_401()
# note that for request.body to be available we need to
# hook in at before_handler, not on_start_resource like
# 3.1.x digest_auth does.
digest = auth.request_digest(ha1, entity_body=request.body)
if digest != auth.response:
respond_401()
# authenticated
if debug:
TRACE('digest matches auth.response')
# Now check if nonce is stale.
# The choice of ten minutes' lifetime for nonce is somewhat
# arbitrary
if auth.is_nonce_stale(max_age_seconds=600):
respond_401(stale=True)
request.login = auth.username
if debug:
TRACE('authentication of %s successful' % auth.username)
def _respond_401(realm, key, accept_charset, debug, **kwargs):
"""Respond with 401 status and a WWW-Authenticate header."""
header = www_authenticate(
realm, key,
accept_charset=accept_charset,
**kwargs
)
if debug:
TRACE(header)
cherrypy.serving.response.headers['WWW-Authenticate'] = header
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
| 15,355
|
Python
|
.py
| 366
| 33.674863
| 79
| 0.626402
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,596
|
encoding.py
|
rembo10_headphones/lib/cherrypy/lib/encoding.py
|
import struct
import time
import io
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import file_generator
from cherrypy.lib import is_closable_iterator
from cherrypy.lib import set_vary_header
_COMPRESSION_LEVEL_FAST = 1
_COMPRESSION_LEVEL_BEST = 9
def decode(encoding=None, default_encoding='utf-8'):
"""Replace or extend the list of charsets used to decode a request entity.
Either argument may be a single string or a list of strings.
encoding
If not None, restricts the set of charsets attempted while decoding
a request entity to the given set (even if a different charset is
given in the Content-Type request header).
default_encoding
Only in effect if the 'encoding' argument is not given.
If given, the set of charsets attempted while decoding a request
entity is *extended* with the given value(s).
"""
body = cherrypy.request.body
if encoding is not None:
if not isinstance(encoding, list):
encoding = [encoding]
body.attempt_charsets = encoding
elif default_encoding:
if not isinstance(default_encoding, list):
default_encoding = [default_encoding]
body.attempt_charsets = body.attempt_charsets + default_encoding
class UTF8StreamEncoder:
def __init__(self, iterator):
self._iterator = iterator
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
res = next(self._iterator)
if isinstance(res, str):
res = res.encode('utf-8')
return res
def close(self):
if is_closable_iterator(self._iterator):
self._iterator.close()
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError(self, attr)
return getattr(self._iterator, attr)
class ResponseEncoder:
default_encoding = 'utf-8'
failmsg = 'Response body could not be encoded with %r.'
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, str):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
body = []
for chunk in self.body:
if isinstance(chunk, str):
try:
chunk = chunk.encode(encoding, self.errors)
except (LookupError, UnicodeError):
return False
body.append(chunk)
self.body = body
return True
def find_acceptable_charset(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log('response.stream %r' %
response.stream, 'TOOLS.ENCODE')
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers['Content-Length']
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log('Specified encoding %r' %
encoding, 'TOOLS.ENCODE')
if (not charsets) or '*' in charsets or encoding in charsets:
if self.debug:
cherrypy.log('Attempting encoding %r' %
encoding, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log('Attempting default encoding %r' %
self.default_encoding, 'TOOLS.ENCODE')
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(500, self.failmsg %
self.default_encoding)
else:
for element in encs:
if element.qvalue > 0:
if element.value == '*':
# Matches any charset. Try our default.
if self.debug:
cherrypy.log('Attempting default encoding due '
'to %r' % element, 'TOOLS.ENCODE')
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log('Attempting encoding %s (qvalue >'
'0)' % element, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
if '*' not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log('Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE')
if encoder(iso):
return iso
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = 'Your client did not send an Accept-Charset header.'
else:
msg = 'Your client sent this Accept-Charset header: %s.' % ac
_charsets = ', '.join(sorted(self.attempted_charsets))
msg += ' We tried these charsets: %s.' % (_charsets,)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
self.body = prepare_iter(self.body)
ct = response.headers.elements('Content-Type')
if self.debug:
cherrypy.log('Content-Type: %r' % [str(h)
for h in ct], 'TOOLS.ENCODE')
if ct and self.add_charset:
ct = ct[0]
if self.text_only:
if ct.value.lower().startswith('text/'):
if self.debug:
cherrypy.log(
'Content-Type %s starts with "text/"' % ct,
'TOOLS.ENCODE')
do_find = True
else:
if self.debug:
cherrypy.log('Not finding because Content-Type %s '
'does not start with "text/"' % ct,
'TOOLS.ENCODE')
do_find = False
else:
if self.debug:
cherrypy.log('Finding because not text_only',
'TOOLS.ENCODE')
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.debug:
cherrypy.log('Setting Content-Type %s' % ct,
'TOOLS.ENCODE')
response.headers['Content-Type'] = str(ct)
return self.body
def prepare_iter(value):
"""Ensure response body is iterable and resolves to False when empty."""
if isinstance(value, text_or_bytes):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
# Don't use isinstance here; io.IOBase which has an ABC takes
# 1000 times as long as, say, isinstance(value, str)
elif hasattr(value, 'read'):
value = file_generator(value)
elif value is None:
value = []
return value
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See https://tools.ietf.org/html/rfc1952
yield b'\x1f\x8b' # ID1 and ID2: gzip marker
yield b'\x08' # CM: compression method
yield b'\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack('<L', int(time.time()) & int('FFFFFFFF', 16))
# RFC 1952, section 2.3.1:
#
# XFL (eXtra FLags)
# These flags are available for use by specific compression
# methods. The "deflate" method (CM = 8) sets these flags as
# follows:
#
# XFL = 2 - compressor used maximum compression,
# slowest algorithm
# XFL = 4 - compressor used fastest algorithm
if compress_level == _COMPRESSION_LEVEL_BEST:
yield b'\x02' # XFL: max compression, slowest algo
elif compress_level == _COMPRESSION_LEVEL_FAST:
yield b'\x04' # XFL: min compression, fastest algo
else:
yield b'\x00' # XFL: compression unset/tradeoff
yield b'\xff' # OS: unknown
crc = zlib.crc32(b'')
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack('<L', crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack('<L', size & int('FFFFFFFF', 16))
def decompress(body):
import gzip
zbuf = io.BytesIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'],
debug=False):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
The provided list of mime-types must be of one of the following form:
* `type/subtype`
* `type/*`
* `type/*+subtype`
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, 'Accept-Encoding')
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, 'cached', False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log('Non-zero identity qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log('Zero gzip qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if ct not in mime_types:
# If the list of provided mime-types contains tokens
# such as 'text/*' or 'application/*+xml',
# we go through them and find the most appropriate one
# based on the given content-type.
# The pattern matching is only caring about the most
# common cases, as stated above, and doesn't support
# for extra parameters.
found = False
if '/' in ct:
ct_media_type, ct_sub_type = ct.split('/')
for mime_type in mime_types:
if '/' in mime_type:
media_type, sub_type = mime_type.split('/')
if ct_media_type == media_type:
if sub_type == '*':
found = True
break
elif '+' in sub_type and '+' in ct_sub_type:
ct_left, ct_right = ct_sub_type.split('+')
left, right = sub_type.split('+')
if left == '*' and ct_right == right:
found = True
break
if not found:
if debug:
cherrypy.log('Content-Type %s not in mime_types %r' %
(ct, mime_types), context='TOOLS.GZIP')
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, 'identity, gzip').set_response()
| 17,036
|
Python
|
.py
| 387
| 30.770026
| 79
| 0.542237
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,597
|
jsontools.py
|
rembo10_headphones/lib/cherrypy/lib/jsontools.py
|
import cherrypy
from cherrypy import _json as json
from cherrypy._cpcompat import text_or_bytes, ntou
def json_processor(entity):
"""Read application/json data into request.json."""
if not entity.headers.get(ntou('Content-Length'), ntou('')):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
with cherrypy.HTTPError.handle(ValueError, 400, 'Invalid JSON document'):
cherrypy.serving.request.json = json.decode(body.decode('utf-8'))
def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
force=True, debug=False, processor=json_processor):
"""Add a processor to parse JSON request entities:
The default processor places the parsed data into request.json.
Incoming request entities which match the given content_type(s) will
be deserialized from JSON to the Python equivalent, and the result
stored at cherrypy.request.json. The 'content_type' argument may
be a Content-Type string or a list of allowable Content-Type strings.
If the 'force' argument is True (the default), then entities of other
content types will not be allowed; "415 Unsupported Media Type" is
raised instead.
Supply your own processor to use a custom decoder, or to handle the parsed
data differently. The processor can be configured via
tools.json_in.processor or via the decorator method.
Note that the deserializer requires the client send a Content-Length
request header, or it will raise "411 Length Required". If for any
other reason the request entity cannot be deserialized from JSON,
it will raise "400 Bad Request: Invalid JSON document".
"""
request = cherrypy.serving.request
if isinstance(content_type, text_or_bytes):
content_type = [content_type]
if force:
if debug:
cherrypy.log('Removing body processors %s' %
repr(request.body.processors.keys()), 'TOOLS.JSON_IN')
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an entity of content type %s' %
', '.join(content_type))
for ct in content_type:
if debug:
cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN')
request.body.processors[ct] = processor
def json_handler(*args, **kwargs):
value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
return json.encode(value)
def json_out(content_type='application/json', debug=False,
handler=json_handler):
"""Wrap request.handler to serialize its output to JSON. Sets Content-Type.
If the given content_type is None, the Content-Type response header
is not set.
Provide your own handler to use a custom encoder. For example
cherrypy.config['tools.json_out.handler'] = <function>, or
@json_out(handler=function).
"""
request = cherrypy.serving.request
# request.handler may be set to None by e.g. the caching tool
# to signal to all components that a response body has already
# been attached, in which case we don't need to wrap anything.
if request.handler is None:
return
if debug:
cherrypy.log('Replacing %s with JSON handler' % request.handler,
'TOOLS.JSON_OUT')
request._json_inner_handler = request.handler
request.handler = handler
if content_type is not None:
if debug:
cherrypy.log('Setting Content-Type to %s' %
content_type, 'TOOLS.JSON_OUT')
cherrypy.serving.response.headers['Content-Type'] = content_type
| 3,641
|
Python
|
.py
| 72
| 43.444444
| 79
| 0.695664
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,598
|
__init__.py
|
rembo10_headphones/lib/cherrypy/lib/__init__.py
|
"""CherryPy Library."""
def is_iterator(obj):
"""Detect if the object provided implements the iterator protocol.
(i.e. like a generator).
This will return False for objects which are iterable, but not
iterators themselves.
"""
from types import GeneratorType
if isinstance(obj, GeneratorType):
return True
elif not hasattr(obj, '__iter__'):
return False
else:
# Types which implement the protocol must return themselves when
# invoking 'iter' upon them.
return iter(obj) is obj
def is_closable_iterator(obj):
"""Detect if the given object is both closable and iterator."""
# Not an iterator.
if not is_iterator(obj):
return False
# A generator - the easiest thing to deal with.
import inspect
if inspect.isgenerator(obj):
return True
# A custom iterator. Look for a close method...
if not (hasattr(obj, 'close') and callable(obj.close)):
return False
# ... which doesn't require any arguments.
try:
inspect.getcallargs(obj.close)
except TypeError:
return False
else:
return True
class file_generator(object):
"""Yield the given input (a file object) in chunks (default 64k).
(Core)
"""
def __init__(self, input, chunkSize=65536):
"""Initialize file_generator with file ``input`` for chunked access."""
self.input = input
self.chunkSize = chunkSize
def __iter__(self):
"""Return iterator."""
return self
def __next__(self):
"""Return next chunk of file."""
chunk = self.input.read(self.chunkSize)
if chunk:
return chunk
else:
if hasattr(self.input, 'close'):
self.input.close()
raise StopIteration()
next = __next__
def __del__(self):
"""Close input on descturct."""
if hasattr(self.input, 'close'):
self.input.close()
def file_generator_limited(fileobj, count, chunk_size=65536):
"""Yield the given file object in chunks.
Stopps after `count` bytes has been emitted.
Default chunk size is 64kB. (Core)
"""
remaining = count
while remaining > 0:
chunk = fileobj.read(min(chunk_size, remaining))
chunklen = len(chunk)
if chunklen == 0:
return
remaining -= chunklen
yield chunk
def set_vary_header(response, header_name):
"""Add a Vary header to a response."""
varies = response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(',') if x.strip()]
if header_name not in varies:
varies.append(header_name)
response.headers['Vary'] = ', '.join(varies)
| 2,745
|
Python
|
.py
| 80
| 27.3
| 79
| 0.624433
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,599
|
static.py
|
rembo10_headphones/lib/cherrypy/lib/static.py
|
"""Module with helpers for serving static files."""
import mimetypes
import os
import platform
import re
import stat
import unicodedata
import urllib.parse
from email.generator import _make_boundary as make_boundary
from io import UnsupportedOperation
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.lib import cptools, file_generator_limited, httputil
def _setup_mimetypes():
"""Pre-initialize global mimetype map."""
if not mimetypes.inited:
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
_setup_mimetypes()
def _make_content_disposition(disposition, file_name):
"""Create HTTP header for downloading a file with a UTF-8 filename.
This function implements the recommendations of :rfc:`6266#appendix-D`.
See this and related answers: https://stackoverflow.com/a/8996249/2173868.
"""
# As normalization algorithm for `unicodedata` is used composed form (NFC
# and NFKC) with compatibility equivalence criteria (NFK), so "NFKC" is the
# one. It first applies the compatibility decomposition, followed by the
# canonical composition. Should be displayed in the same manner, should be
# treated in the same way by applications such as alphabetizing names or
# searching, and may be substituted for each other.
# See: https://en.wikipedia.org/wiki/Unicode_equivalence.
ascii_name = (
unicodedata.normalize('NFKC', file_name).
encode('ascii', errors='ignore').decode()
)
header = '{}; filename="{}"'.format(disposition, ascii_name)
if ascii_name != file_name:
quoted_name = urllib.parse.quote(file_name)
header += '; filename*=UTF-8\'\'{}'.format(quoted_name)
return header
def serve_file(path, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if
provided. If not provided, the Content-Type will be guessed by the
file extension of the 'path' argument.
If disposition is not None, the Content-Disposition header will be
set to "<disposition>; filename=<name>; filename*=utf-8''<name>" as
described in :rfc:`6266#appendix-D`. If name is None, it will be set
to the basename of path. If disposition is None, no Content-
Disposition header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except (OSError, TypeError, ValueError):
# OSError when file fails to stat
# TypeError on Python 2 when there's a null byte
# ValueError on Python 3 when there's a null byte
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ''
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = _make_content_disposition(disposition, name)
response.headers['Content-Disposition'] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>; filename*=utf-8''<name>"
as described in :rfc:`6266#appendix-D`.
If name is None, 'filename' will not be set.
If disposition is None, no Content-Disposition header will be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = _make_content_disposition(disposition, name)
response.headers['Content-Disposition'] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Set ``response.body`` to the given file object, perhaps ranged.
Internal helper.
"""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers['Accept-Ranges'] = 'bytes'
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = 'bytes */%s' % content_length
message = ('Invalid Range (first-byte-pos greater than '
'Content-Length)')
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log(
'Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = '206 Partial Content'
response.headers['Content-Range'] = (
'bytes %s-%s/%s' % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = '206 Partial Content'
boundary = make_boundary()
ct = 'multipart/byteranges; boundary=%s' % boundary
response.headers['Content-Type'] = ct
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
def file_ranges():
# Apache compatibility:
yield b'\r\n'
for start, stop in r:
if debug:
cherrypy.log(
'Multipart; start: %r, stop: %r' % (
start, stop),
'TOOLS.STATIC')
yield ntob('--' + boundary, 'ascii')
yield ntob('\r\nContent-type: %s' % content_type,
'ascii')
yield ntob(
'\r\nContent-range: bytes %s-%s/%s\r\n\r\n' % (
start, stop - 1, content_length),
'ascii')
fileobj.seek(start)
gen = file_generator_limited(fileobj, stop - start)
for chunk in gen:
yield chunk
yield b'\r\n'
# Final boundary
yield ntob('--' + boundary + '--', 'ascii')
# Apache compatibility:
yield b'\r\n'
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, 'application/x-download', 'attachment', name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root='', match='', content_types=None, index='',
debug=False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = 'Static dir requires an absolute dir (or root).'
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = '/'
section = section.rstrip(r'\/')
branch = request.path_info[len(section) + 1:]
branch = urllib.parse.unquote(branch.lstrip(r'\/'))
# Requesting a file in sub-dir of the staticdir results
# in mixing of delimiter styles, e.g. C:\static\js/script.js.
# Windows accepts this form except not when the path is
# supplied in extended-path notation, e.g. \\?\C:\static\js/script.js.
# http://bit.ly/1vdioCX
if platform.system() == 'Windows':
branch = branch.replace('/', '\\')
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r'\/')
return handled
def staticfile(filename, root=None, match='', content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % (
filename,)
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| 16,613
|
Python
|
.py
| 355
| 37.166197
| 79
| 0.621612
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|