id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
8,400
expressions.py
rembo10_headphones/lib/apscheduler/triggers/cron/expressions.py
"""This module contains the expressions applicable for CronTrigger's fields.""" from calendar import monthrange import re from apscheduler.util import asint __all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', 'WeekdayPositionExpression', 'LastDayOfMonthExpression') WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] class AllExpression(object): value_re = re.compile(r'\*(?:/(?P<step>\d+))?$') def __init__(self, step=None): self.step = asint(step) if self.step == 0: raise ValueError('Increment must be higher than 0') def validate_range(self, field_name): from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name] if self.step and self.step > value_range: raise ValueError('the step value ({}) is higher than the total range of the ' 'expression ({})'.format(self.step, value_range)) def get_next_value(self, date, field): start = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) start = max(start, minval) if not self.step: next = start else: distance_to_next = (self.step - (start - minval)) % self.step next = start + distance_to_next if next <= maxval: return next def __eq__(self, other): return isinstance(other, self.__class__) and self.step == other.step def __str__(self): if self.step: return '*/%d' % self.step return '*' def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.step) class RangeExpression(AllExpression): value_re = re.compile( r'(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$') def __init__(self, first, last=None, step=None): super(RangeExpression, self).__init__(step) first = asint(first) last = asint(last) if last is None and step is None: last = first if last is not None and first > last: raise ValueError('The minimum value in a range must not be higher than the maximum') self.first = first self.last = last def validate_range(self, field_name): from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES super(RangeExpression, self).validate_range(field_name) if self.first < MIN_VALUES[field_name]: raise ValueError('the first value ({}) is lower than the minimum value ({})' .format(self.first, MIN_VALUES[field_name])) if self.last is not None and self.last > MAX_VALUES[field_name]: raise ValueError('the last value ({}) is higher than the maximum value ({})' .format(self.last, MAX_VALUES[field_name])) value_range = (self.last or MAX_VALUES[field_name]) - self.first if self.step and self.step > value_range: raise ValueError('the step value ({}) is higher than the total range of the ' 'expression ({})'.format(self.step, value_range)) def get_next_value(self, date, field): startval = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) # Apply range limits minval = max(minval, self.first) maxval = min(maxval, self.last) if self.last is not None else maxval nextval = max(minval, startval) # Apply the step if defined if self.step: distance_to_next = (self.step - (nextval - minval)) % self.step nextval += distance_to_next return nextval if nextval <= maxval else None def __eq__(self, other): return (isinstance(other, self.__class__) and self.first == other.first and self.last == other.last) def __str__(self): if self.last != self.first and self.last is not None: range = '%d-%d' % (self.first, self.last) else: range = str(self.first) if self.step: return '%s/%d' % (range, self.step) return range def __repr__(self): args = [str(self.first)] if self.last != self.first and self.last is not None or self.step: args.append(str(self.last)) if self.step: args.append(str(self.step)) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class MonthRangeExpression(RangeExpression): value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE) def __init__(self, first, last=None): try: first_num = MONTHS.index(first.lower()) + 1 except ValueError: raise ValueError('Invalid month name "%s"' % first) if last: try: last_num = MONTHS.index(last.lower()) + 1 except ValueError: raise ValueError('Invalid month name "%s"' % last) else: last_num = None super(MonthRangeExpression, self).__init__(first_num, last_num) def __str__(self): if self.last != self.first and self.last is not None: return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1]) return MONTHS[self.first - 1] def __repr__(self): args = ["'%s'" % MONTHS[self.first]] if self.last != self.first and self.last is not None: args.append("'%s'" % MONTHS[self.last - 1]) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayRangeExpression(RangeExpression): value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE) def __init__(self, first, last=None): try: first_num = WEEKDAYS.index(first.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % first) if last: try: last_num = WEEKDAYS.index(last.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % last) else: last_num = None super(WeekdayRangeExpression, self).__init__(first_num, last_num) def __str__(self): if self.last != self.first and self.last is not None: return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) return WEEKDAYS[self.first] def __repr__(self): args = ["'%s'" % WEEKDAYS[self.first]] if self.last != self.first and self.last is not None: args.append("'%s'" % WEEKDAYS[self.last]) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayPositionExpression(AllExpression): options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] value_re = re.compile(r'(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))' % '|'.join(options), re.IGNORECASE) def __init__(self, option_name, weekday_name): super(WeekdayPositionExpression, self).__init__(None) try: self.option_num = self.options.index(option_name.lower()) except ValueError: raise ValueError('Invalid weekday position "%s"' % option_name) try: self.weekday = WEEKDAYS.index(weekday_name.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % weekday_name) def get_next_value(self, date, field): # Figure out the weekday of the month's first day and the number of days in that month first_day_wday, last_day = monthrange(date.year, date.month) # Calculate which day of the month is the first of the target weekdays first_hit_day = self.weekday - first_day_wday + 1 if first_hit_day <= 0: first_hit_day += 7 # Calculate what day of the month the target weekday would be if self.option_num < 5: target_day = first_hit_day + self.option_num * 7 else: target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7 if target_day <= last_day and target_day >= date.day: return target_day def __eq__(self, other): return (super(WeekdayPositionExpression, self).__eq__(other) and self.option_num == other.option_num and self.weekday == other.weekday) def __str__(self): return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday]) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], WEEKDAYS[self.weekday]) class LastDayOfMonthExpression(AllExpression): value_re = re.compile(r'last', re.IGNORECASE) def __init__(self): super(LastDayOfMonthExpression, self).__init__(None) def get_next_value(self, date, field): return monthrange(date.year, date.month)[1] def __str__(self): return 'last' def __repr__(self): return "%s()" % self.__class__.__name__
9,184
Python
.py
191
38.225131
96
0.581552
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,401
__init__.py
rembo10_headphones/lib/apscheduler/triggers/cron/__init__.py
from datetime import datetime, timedelta from tzlocal import get_localzone import six from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.cron.fields import ( BaseField, MonthField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES) from apscheduler.util import ( datetime_ceil, convert_to_datetime, datetime_repr, astimezone, localize, normalize) class CronTrigger(BaseTrigger): """ Triggers when current time matches all specified time constraints, similarly to how the UNIX cron scheduler works. :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of month (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) :param int|None jitter: delay the job execution by ``jitter`` seconds at most .. note:: The first weekday is always **monday**. """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') FIELDS_MAP = { 'year': BaseField, 'month': MonthField, 'week': WeekField, 'day': DayOfMonthField, 'day_of_week': DayOfWeekField, 'hour': BaseField, 'minute': BaseField, 'second': BaseField } __slots__ = 'timezone', 'start_date', 'end_date', 'fields', 'jitter' def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None, jitter=None): if timezone: self.timezone = astimezone(timezone) elif isinstance(start_date, datetime) and start_date.tzinfo: self.timezone = start_date.tzinfo elif isinstance(end_date, datetime) and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') self.jitter = jitter values = dict((key, value) for (key, value) in six.iteritems(locals()) if key in self.FIELD_NAMES and value is not None) self.fields = [] assign_defaults = False for field_name in self.FIELD_NAMES: if field_name in values: exprs = values.pop(field_name) is_default = False assign_defaults = not values elif assign_defaults: exprs = DEFAULT_VALUES[field_name] is_default = True else: exprs = '*' is_default = True field_class = self.FIELDS_MAP[field_name] field = field_class(field_name, exprs, is_default) self.fields.append(field) @classmethod def from_crontab(cls, expr, timezone=None): """ Create a :class:`~CronTrigger` from a standard crontab expression. See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here. :param expr: minute, hour, day of month, month, day of week :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations ( defaults to scheduler timezone) :return: a :class:`~CronTrigger` instance """ values = expr.split() if len(values) != 5: raise ValueError('Wrong number of fields; got {}, expected 5'.format(len(values))) return cls(minute=values[0], hour=values[1], day=values[2], month=values[3], day_of_week=values[4], timezone=timezone) def _increment_field_value(self, dateval, fieldnum): """ Increments the designated field and resets all less significant fields to their minimum values. :type dateval: datetime :type fieldnum: int :return: a tuple containing the new date, and the number of the field that was actually incremented :rtype: tuple """ values = {} i = 0 while i < len(self.fields): field = self.fields[i] if not field.REAL: if i == fieldnum: fieldnum -= 1 i -= 1 else: i += 1 continue if i < fieldnum: values[field.name] = field.get_value(dateval) i += 1 elif i > fieldnum: values[field.name] = field.get_min(dateval) i += 1 else: value = field.get_value(dateval) maxval = field.get_max(dateval) if value == maxval: fieldnum -= 1 i -= 1 else: values[field.name] = value + 1 i += 1 difference = datetime(**values) - dateval.replace(tzinfo=None) return normalize(dateval + difference), fieldnum def _set_field_value(self, dateval, fieldnum, new_value): values = {} for i, field in enumerate(self.fields): if field.REAL: if i < fieldnum: values[field.name] = field.get_value(dateval) elif i > fieldnum: values[field.name] = field.get_min(dateval) else: values[field.name] = new_value return localize(datetime(**values), self.timezone) def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: start_date = min(now, previous_fire_time + timedelta(microseconds=1)) if start_date == previous_fire_time: start_date += timedelta(microseconds=1) else: start_date = max(now, self.start_date) if self.start_date else now fieldnum = 0 next_date = datetime_ceil(start_date).astimezone(self.timezone) while 0 <= fieldnum < len(self.fields): field = self.fields[fieldnum] curr_value = field.get_value(next_date) next_value = field.get_next_value(next_date) if next_value is None: # No valid value was found next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) elif next_value > curr_value: # A valid, but higher than the starting value, was found if field.REAL: next_date = self._set_field_value(next_date, fieldnum, next_value) fieldnum += 1 else: next_date, fieldnum = self._increment_field_value(next_date, fieldnum) else: # A valid value was found, no changes necessary fieldnum += 1 # Return if the date has rolled past the end date if self.end_date and next_date > self.end_date: return None if fieldnum >= 0: next_date = self._apply_jitter(next_date, self.jitter, now) return min(next_date, self.end_date) if self.end_date else next_date def __getstate__(self): return { 'version': 2, 'timezone': self.timezone, 'start_date': self.start_date, 'end_date': self.end_date, 'fields': self.fields, 'jitter': self.jitter, } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 2: raise ValueError( 'Got serialized data for version %s of %s, but only versions up to 2 can be ' 'handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.fields = state['fields'] self.jitter = state.get('jitter') def __str__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] return 'cron[%s]' % (', '.join(options)) def __repr__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] if self.start_date: options.append("start_date=%r" % datetime_repr(self.start_date)) if self.end_date: options.append("end_date=%r" % datetime_repr(self.end_date)) if self.jitter: options.append('jitter=%s' % self.jitter) return "<%s (%s, timezone='%s')>" % ( self.__class__.__name__, ', '.join(options), self.timezone)
9,251
Python
.py
202
34.420792
98
0.579228
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,402
fields.py
rembo10_headphones/lib/apscheduler/triggers/cron/fields.py
"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields.""" from calendar import monthrange import re import six from apscheduler.triggers.cron.expressions import ( AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression, MonthRangeExpression) __all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField') MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0} MAX_VALUES = {'year': 9999, 'month': 12, 'day': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59} DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0} SEPARATOR = re.compile(' *, *') class BaseField(object): REAL = True COMPILERS = [AllExpression, RangeExpression] def __init__(self, name, exprs, is_default=False): self.name = name self.is_default = is_default self.compile_expressions(exprs) def get_min(self, dateval): return MIN_VALUES[self.name] def get_max(self, dateval): return MAX_VALUES[self.name] def get_value(self, dateval): return getattr(dateval, self.name) def get_next_value(self, dateval): smallest = None for expr in self.expressions: value = expr.get_next_value(dateval, self) if smallest is None or (value is not None and value < smallest): smallest = value return smallest def compile_expressions(self, exprs): self.expressions = [] # Split a comma-separated expression list, if any for expr in SEPARATOR.split(str(exprs).strip()): self.compile_expression(expr) def compile_expression(self, expr): for compiler in self.COMPILERS: match = compiler.value_re.match(expr) if match: compiled_expr = compiler(**match.groupdict()) try: compiled_expr.validate_range(self.name) except ValueError as e: exc = ValueError('Error validating expression {!r}: {}'.format(expr, e)) six.raise_from(exc, None) self.expressions.append(compiled_expr) return raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) def __eq__(self, other): return isinstance(self, self.__class__) and self.expressions == other.expressions def __str__(self): expr_strings = (str(e) for e in self.expressions) return ','.join(expr_strings) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self) class WeekField(BaseField): REAL = False def get_value(self, dateval): return dateval.isocalendar()[1] class DayOfMonthField(BaseField): COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] def get_max(self, dateval): return monthrange(dateval.year, dateval.month)[1] class DayOfWeekField(BaseField): REAL = False COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] def get_value(self, dateval): return dateval.weekday() class MonthField(BaseField): COMPILERS = BaseField.COMPILERS + [MonthRangeExpression]
3,510
Python
.py
76
37.763158
96
0.633127
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,403
redis.py
rembo10_headphones/lib/apscheduler/jobstores/redis.py
from __future__ import absolute_import from datetime import datetime from pytz import utc import six from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from redis import Redis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') class RedisJobStore(BaseJobStore): """ Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's :class:`~redis.StrictRedis`. Plugin alias: ``redis`` :param int db: the database number to store jobs in :param str jobs_key: key to store jobs in :param str run_times_key: key to store the jobs' run times in :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RedisJobStore, self).__init__() if db is None: raise ValueError('The "db" parameter must not be empty') if not jobs_key: raise ValueError('The "jobs_key" parameter must not be empty') if not run_times_key: raise ValueError('The "run_times_key" parameter must not be empty') self.pickle_protocol = pickle_protocol self.jobs_key = jobs_key self.run_times_key = run_times_key self.redis = Redis(db=int(db), **connect_args) def lookup_job(self, job_id): job_state = self.redis.hget(self.jobs_key, job_id) return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp) if job_ids: job_states = self.redis.hmget(self.jobs_key, *job_ids) return self._reconstitute_jobs(six.moves.zip(job_ids, job_states)) return [] def get_next_run_time(self): next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True) if next_run_time: return utc_timestamp_to_datetime(next_run_time[0][1]) def get_all_jobs(self): job_states = self.redis.hgetall(self.jobs_key) jobs = self._reconstitute_jobs(six.iteritems(job_states)) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key) def add_job(self, job): if self.redis.hexists(self.jobs_key, job.id): raise ConflictingIdError(job.id) with self.redis.pipeline() as pipe: pipe.multi() pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, {job.id: datetime_to_utc_timestamp(job.next_run_time)}) pipe.execute() def update_job(self, job): if not self.redis.hexists(self.jobs_key, job.id): raise JobLookupError(job.id) with self.redis.pipeline() as pipe: pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, {job.id: datetime_to_utc_timestamp(job.next_run_time)}) else: pipe.zrem(self.run_times_key, job.id) pipe.execute() def remove_job(self, job_id): if not self.redis.hexists(self.jobs_key, job_id): raise JobLookupError(job_id) with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, job_id) pipe.zrem(self.run_times_key, job_id) pipe.execute() def remove_all_jobs(self): with self.redis.pipeline() as pipe: pipe.delete(self.jobs_key) pipe.delete(self.run_times_key) pipe.execute() def shutdown(self): self.redis.connection_pool.disconnect() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _reconstitute_jobs(self, job_states): jobs = [] failed_job_ids = [] for job_id, job_state in job_states: try: jobs.append(self._reconstitute_job(job_state)) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', job_id) failed_job_ids.append(job_id) # Remove all the jobs we failed to restore if failed_job_ids: with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, *failed_job_ids) pipe.zrem(self.run_times_key, *failed_job_ids) pipe.execute() return jobs def __repr__(self): return '<%s>' % self.__class__.__name__
5,483
Python
.py
120
35.433333
98
0.616726
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,404
memory.py
rembo10_headphones/lib/apscheduler/jobstores/memory.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp class MemoryJobStore(BaseJobStore): """ Stores jobs in an array in RAM. Provides no persistence support. Plugin alias: ``memory`` """ def __init__(self): super(MemoryJobStore, self).__init__() # list of (job, timestamp), sorted by next_run_time and job id (ascending) self._jobs = [] self._jobs_index = {} # id -> (job, timestamp) lookup table def lookup_job(self, job_id): return self._jobs_index.get(job_id, (None, None))[0] def get_due_jobs(self, now): now_timestamp = datetime_to_utc_timestamp(now) pending = [] for job, timestamp in self._jobs: if timestamp is None or timestamp > now_timestamp: break pending.append(job) return pending def get_next_run_time(self): return self._jobs[0][0].next_run_time if self._jobs else None def get_all_jobs(self): return [j[0] for j in self._jobs] def add_job(self, job): if job.id in self._jobs_index: raise ConflictingIdError(job.id) timestamp = datetime_to_utc_timestamp(job.next_run_time) index = self._get_job_index(timestamp, job.id) self._jobs.insert(index, (job, timestamp)) self._jobs_index[job.id] = (job, timestamp) def update_job(self, job): old_job, old_timestamp = self._jobs_index.get(job.id, (None, None)) if old_job is None: raise JobLookupError(job.id) # If the next run time has not changed, simply replace the job in its present index. # Otherwise, reinsert the job to the list to preserve the ordering. old_index = self._get_job_index(old_timestamp, old_job.id) new_timestamp = datetime_to_utc_timestamp(job.next_run_time) if old_timestamp == new_timestamp: self._jobs[old_index] = (job, new_timestamp) else: del self._jobs[old_index] new_index = self._get_job_index(new_timestamp, job.id) self._jobs.insert(new_index, (job, new_timestamp)) self._jobs_index[old_job.id] = (job, new_timestamp) def remove_job(self, job_id): job, timestamp = self._jobs_index.get(job_id, (None, None)) if job is None: raise JobLookupError(job_id) index = self._get_job_index(timestamp, job_id) del self._jobs[index] del self._jobs_index[job.id] def remove_all_jobs(self): self._jobs = [] self._jobs_index = {} def shutdown(self): self.remove_all_jobs() def _get_job_index(self, timestamp, job_id): """ Returns the index of the given job, or if it's not found, the index where the job should be inserted based on the given timestamp. :type timestamp: int :type job_id: str """ lo, hi = 0, len(self._jobs) timestamp = float('inf') if timestamp is None else timestamp while lo < hi: mid = (lo + hi) // 2 mid_job, mid_timestamp = self._jobs[mid] mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp if mid_timestamp > timestamp: hi = mid elif mid_timestamp < timestamp: lo = mid + 1 elif mid_job.id > job_id: hi = mid elif mid_job.id < job_id: lo = mid + 1 else: return mid return lo
3,655
Python
.py
85
33.447059
99
0.598534
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,405
rethinkdb.py
rembo10_headphones/lib/apscheduler/jobstores/rethinkdb.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from rethinkdb import RethinkDB except ImportError: # pragma: nocover raise ImportError('RethinkDBJobStore requires rethinkdb installed') class RethinkDBJobStore(BaseJobStore): """ Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_. Plugin alias: ``rethinkdb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', table='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RethinkDBJobStore, self).__init__() if not database: raise ValueError('The "database" parameter must not be empty') if not table: raise ValueError('The "table" parameter must not be empty') self.database = database self.table_name = table self.table = None self.client = client self.pickle_protocol = pickle_protocol self.connect_args = connect_args self.r = RethinkDB() self.conn = None def start(self, scheduler, alias): super(RethinkDBJobStore, self).start(scheduler, alias) if self.client: self.conn = maybe_ref(self.client) else: self.conn = self.r.connect(db=self.database, **self.connect_args) if self.database not in self.r.db_list().run(self.conn): self.r.db_create(self.database).run(self.conn) if self.table_name not in self.r.table_list().run(self.conn): self.r.table_create(self.table_name).run(self.conn) if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn): self.r.table(self.table_name).index_create('next_run_time').run(self.conn) self.table = self.r.db(self.database).table(self.table_name) def lookup_job(self, job_id): results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) return self._reconstitute_job(results[0]['job_state']) if results else None def get_due_jobs(self, now): return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) def get_next_run_time(self): results = list( self.table .filter(self.r.row['next_run_time'] != None) # noqa .order_by(self.r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) ) return utc_timestamp_to_datetime(results[0]) if results else None def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): job_dict = { 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.insert(job_dict).run(self.conn) if results['errors'] > 0: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.get_all(job.id).update(changes).run(self.conn) skipped = False in map(lambda x: results[x] == 0, results.keys()) if results['skipped'] > 0 or results['errors'] > 0 or not skipped: raise JobLookupError(job.id) def remove_job(self, job_id): results = self.table.get_all(job_id).delete().run(self.conn) if results['deleted'] + results['skipped'] != 1: raise JobLookupError(job_id) def remove_all_jobs(self): self.table.delete().run(self.conn) def shutdown(self): self.conn.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa if predicate else self.table) query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) except Exception: self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: self.r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) return jobs def __repr__(self): connection = self.conn return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
5,863
Python
.py
124
38.306452
99
0.632446
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,406
sqlalchemy.py
rembo10_headphones/lib/apscheduler/jobstores/sqlalchemy.py
from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from sqlalchemy import ( create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select, and_) from sqlalchemy.exc import IntegrityError from sqlalchemy.sql.expression import null except ImportError: # pragma: nocover raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed') class SQLAlchemyJobStore(BaseJobStore): """ Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database. Plugin alias: ``sqlalchemy`` :param str url: connection string (see :ref:`SQLAlchemy documentation <sqlalchemy:database_urls>` on this) :param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a new one based on ``url`` :param str tablename: name of the table to store jobs in :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a new one :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available :param str tableschema: name of the (existing) schema in the target database where the table should be :param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine` (ignored if ``engine`` is given) """ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, tableschema=None, engine_options=None): super(SQLAlchemyJobStore, self).__init__() self.pickle_protocol = pickle_protocol metadata = maybe_ref(metadata) or MetaData() if engine: self.engine = maybe_ref(engine) elif url: self.engine = create_engine(url, **(engine_options or {})) else: raise ValueError('Need either "engine" or "url" defined') # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, # 25 = precision that translates to an 8-byte float self.jobs_t = Table( tablename, metadata, Column('id', Unicode(191), primary_key=True), Column('next_run_time', Float(25), index=True), Column('job_state', LargeBinary, nullable=False), schema=tableschema ) def start(self, scheduler, alias): super(SQLAlchemyJobStore, self).start(scheduler, alias) self.jobs_t.create(self.engine, True) def lookup_job(self, job_id): selectable = select(self.jobs_t.c.job_state).where(self.jobs_t.c.id == job_id) with self.engine.begin() as connection: job_state = connection.execute(selectable).scalar() return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) def get_next_run_time(self): selectable = select(self.jobs_t.c.next_run_time).\ where(self.jobs_t.c.next_run_time != null()).\ order_by(self.jobs_t.c.next_run_time).limit(1) with self.engine.begin() as connection: next_run_time = connection.execute(selectable).scalar() return utc_timestamp_to_datetime(next_run_time) def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): insert = self.jobs_t.insert().values(**{ 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }) with self.engine.begin() as connection: try: connection.execute(insert) except IntegrityError: raise ConflictingIdError(job.id) def update_job(self, job): update = self.jobs_t.update().values(**{ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }).where(self.jobs_t.c.id == job.id) with self.engine.begin() as connection: result = connection.execute(update) if result.rowcount == 0: raise JobLookupError(job.id) def remove_job(self, job_id): delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) with self.engine.begin() as connection: result = connection.execute(delete) if result.rowcount == 0: raise JobLookupError(job_id) def remove_all_jobs(self): delete = self.jobs_t.delete() with self.engine.begin() as connection: connection.execute(delete) def shutdown(self): self.engine.dispose() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, *conditions): jobs = [] selectable = select(self.jobs_t.c.id, self.jobs_t.c.job_state).\ order_by(self.jobs_t.c.next_run_time) selectable = selectable.where(and_(*conditions)) if conditions else selectable failed_job_ids = set() with self.engine.begin() as connection: for row in connection.execute(selectable): try: jobs.append(self._reconstitute_job(row.job_state)) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', row.id) failed_job_ids.add(row.id) # Remove all the jobs we failed to restore if failed_job_ids: delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) connection.execute(delete) return jobs def __repr__(self): return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
6,529
Python
.py
136
38.610294
97
0.638191
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,407
base.py
rembo10_headphones/lib/apscheduler/jobstores/base.py
from abc import ABCMeta, abstractmethod import logging import six class JobLookupError(KeyError): """Raised when the job store cannot find a job for update or removal.""" def __init__(self, job_id): super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id) class ConflictingIdError(KeyError): """Raised when the uniqueness of job IDs is being violated.""" def __init__(self, job_id): super(ConflictingIdError, self).__init__( u'Job identifier (%s) conflicts with an existing job' % job_id) class TransientJobError(ValueError): """ Raised when an attempt to add transient (with no func_ref) job to a persistent job store is detected. """ def __init__(self, job_id): super(TransientJobError, self).__init__( u'Job (%s) cannot be added to this job store because a reference to the callable ' u'could not be determined.' % job_id) class BaseJobStore(six.with_metaclass(ABCMeta)): """Abstract base class that defines the interface that every job store must implement.""" _scheduler = None _alias = None _logger = logging.getLogger('apscheduler.jobstores') def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the job store is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this job store :param str|unicode alias: alias of this job store as it was assigned to the scheduler """ self._scheduler = scheduler self._alias = alias self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias) def shutdown(self): """Frees any resources still bound to this job store.""" def _fix_paused_jobs_sorting(self, jobs): for i, job in enumerate(jobs): if job.next_run_time is not None: if i > 0: paused_jobs = jobs[:i] del jobs[:i] jobs.extend(paused_jobs) break @abstractmethod def lookup_job(self, job_id): """ Returns a specific job, or ``None`` if it isn't found.. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned job to point to the scheduler and itself, respectively. :param str|unicode job_id: identifier of the job :rtype: Job """ @abstractmethod def get_due_jobs(self, now): """ Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``. The returned jobs must be sorted by next run time (ascending). :param datetime.datetime now: the current (timezone aware) datetime :rtype: list[Job] """ @abstractmethod def get_next_run_time(self): """ Returns the earliest run time of all the jobs stored in this job store, or ``None`` if there are no active jobs. :rtype: datetime.datetime """ @abstractmethod def get_all_jobs(self): """ Returns a list of all jobs in this job store. The returned jobs should be sorted by next run time (ascending). Paused jobs (next_run_time == None) should be sorted last. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned jobs to point to the scheduler and itself, respectively. :rtype: list[Job] """ @abstractmethod def add_job(self, job): """ Adds the given job to this store. :param Job job: the job to add :raises ConflictingIdError: if there is another job in this store with the same ID """ @abstractmethod def update_job(self, job): """ Replaces the job in the store with the given newer version. :param Job job: the job to update :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_job(self, job_id): """ Removes the given job from this store. :param str|unicode job_id: identifier of the job :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_all_jobs(self): """Removes all jobs from this store.""" def __repr__(self): return '<%s>' % self.__class__.__name__
4,523
Python
.py
107
34.018692
98
0.634703
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,408
mongodb.py
rembo10_headphones/lib/apscheduler/jobstores/mongodb.py
from __future__ import absolute_import import warnings from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from bson.binary import Binary from pymongo.errors import DuplicateKeyError from pymongo import MongoClient, ASCENDING except ImportError: # pragma: nocover raise ImportError('MongoDBJobStore requires PyMongo installed') class MongoDBJobStore(BaseJobStore): """ Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to pymongo's `MongoClient <http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_. Plugin alias: ``mongodb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', collection='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(MongoDBJobStore, self).__init__() self.pickle_protocol = pickle_protocol if not database: raise ValueError('The "database" parameter must not be empty') if not collection: raise ValueError('The "collection" parameter must not be empty') if client: self.client = maybe_ref(client) else: connect_args.setdefault('w', 1) self.client = MongoClient(**connect_args) self.collection = self.client[database][collection] def start(self, scheduler, alias): super(MongoDBJobStore, self).start(scheduler, alias) self.collection.create_index('next_run_time', sparse=True) @property def connection(self): warnings.warn('The "connection" member is deprecated -- use "client" instead', DeprecationWarning) return self.client def lookup_job(self, job_id): document = self.collection.find_one(job_id, ['job_state']) return self._reconstitute_job(document['job_state']) if document else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs({'next_run_time': {'$lte': timestamp}}) def get_next_run_time(self): document = self.collection.find_one({'next_run_time': {'$ne': None}}, projection=['next_run_time'], sort=[('next_run_time', ASCENDING)]) return utc_timestamp_to_datetime(document['next_run_time']) if document else None def get_all_jobs(self): jobs = self._get_jobs({}) self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): try: self.collection.insert_one({ '_id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) }) except DuplicateKeyError: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } result = self.collection.update_one({'_id': job.id}, {'$set': changes}) if result and result.matched_count == 0: raise JobLookupError(job.id) def remove_job(self, job_id): result = self.collection.delete_one({'_id': job_id}) if result and result.deleted_count == 0: raise JobLookupError(job_id) def remove_all_jobs(self): self.collection.delete_many({}) def shutdown(self): self.client.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, conditions): jobs = [] failed_job_ids = [] for document in self.collection.find(conditions, ['_id', 'job_state'], sort=[('next_run_time', ASCENDING)]): try: jobs.append(self._reconstitute_job(document['job_state'])) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', document['_id']) failed_job_ids.append(document['_id']) # Remove all the jobs we failed to restore if failed_job_ids: self.collection.delete_many({'_id': {'$in': failed_job_ids}}) return jobs def __repr__(self): return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
5,347
Python
.py
114
37.114035
109
0.627161
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,409
zookeeper.py
rembo10_headphones/lib/apscheduler/jobstores/zookeeper.py
from __future__ import absolute_import from datetime import datetime from pytz import utc from kazoo.exceptions import NoNodeError, NodeExistsError from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from kazoo.client import KazooClient except ImportError: # pragma: nocover raise ImportError('ZooKeeperJobStore requires Kazoo installed') class ZooKeeperJobStore(BaseJobStore): """ Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to kazoo's `KazooClient <http://kazoo.readthedocs.io/en/latest/api/client.html>`_. Plugin alias: ``zookeeper`` :param str path: path to store jobs in :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(ZooKeeperJobStore, self).__init__() self.pickle_protocol = pickle_protocol self.close_connection_on_exit = close_connection_on_exit if not path: raise ValueError('The "path" parameter must not be empty') self.path = path if client: self.client = maybe_ref(client) else: self.client = KazooClient(**connect_args) self._ensured_path = False def _ensure_paths(self): if not self._ensured_path: self.client.ensure_path(self.path) self._ensured_path = True def start(self, scheduler, alias): super(ZooKeeperJobStore, self).start(scheduler, alias) if not self.client.connected: self.client.start() def lookup_job(self, job_id): self._ensure_paths() node_path = self.path + "/" + str(job_id) try: content, _ = self.client.get(node_path) doc = pickle.loads(content) job = self._reconstitute_job(doc['job_state']) return job except BaseException: return None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) jobs = [job_def['job'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp] return jobs def get_next_run_time(self): next_runs = [job_def['next_run_time'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None] return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None def get_all_jobs(self): jobs = [job_def['job'] for job_def in self._get_jobs()] self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): self._ensure_paths() node_path = self.path + "/" + str(job.id) value = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(value, self.pickle_protocol) try: self.client.create(node_path, value=data) except NodeExistsError: raise ConflictingIdError(job.id) def update_job(self, job): self._ensure_paths() node_path = self.path + "/" + str(job.id) changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(changes, self.pickle_protocol) try: self.client.set(node_path, value=data) except NoNodeError: raise JobLookupError(job.id) def remove_job(self, job_id): self._ensure_paths() node_path = self.path + "/" + str(job_id) try: self.client.delete(node_path) except NoNodeError: raise JobLookupError(job_id) def remove_all_jobs(self): try: self.client.delete(self.path, recursive=True) except NoNodeError: pass self._ensured_path = False def shutdown(self): if self.close_connection_on_exit: self.client.stop() self.client.close() def _reconstitute_job(self, job_state): job_state = job_state job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self): self._ensure_paths() jobs = [] failed_job_ids = [] all_ids = self.client.get_children(self.path) for node_name in all_ids: try: node_path = self.path + "/" + node_name content, _ = self.client.get(node_path) doc = pickle.loads(content) job_def = { 'job_id': node_name, 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None, 'job_state': doc['job_state'], 'job': self._reconstitute_job(doc['job_state']), 'creation_time': _.ctime } jobs.append(job_def) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it' % node_name) failed_job_ids.append(node_name) # Remove all the jobs we failed to restore if failed_job_ids: for failed_id in failed_job_ids: self.remove_job(failed_id) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key, job_def['creation_time'])) def __repr__(self): self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client)) return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
6,363
Python
.py
150
32.546667
98
0.599838
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,410
gevent.py
rembo10_headphones/lib/apscheduler/schedulers/gevent.py
from __future__ import absolute_import from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.schedulers.base import BaseScheduler try: from gevent.event import Event from gevent.lock import RLock import gevent except ImportError: # pragma: nocover raise ImportError('GeventScheduler requires gevent installed') class GeventScheduler(BlockingScheduler): """A scheduler that runs as a Gevent greenlet.""" _greenlet = None def start(self, *args, **kwargs): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._greenlet = gevent.spawn(self._main_loop) return self._greenlet def shutdown(self, *args, **kwargs): super(GeventScheduler, self).shutdown(*args, **kwargs) self._greenlet.join() del self._greenlet def _create_lock(self): return RLock() def _create_default_executor(self): from apscheduler.executors.gevent import GeventExecutor return GeventExecutor()
1,031
Python
.py
26
33.692308
66
0.715863
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,411
twisted.py
rembo10_headphones/lib/apscheduler/schedulers/twisted.py
from __future__ import absolute_import from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from twisted.internet import reactor as default_reactor except ImportError: # pragma: nocover raise ImportError('TwistedScheduler requires Twisted installed') def run_in_reactor(func): @wraps(func) def wrapper(self, *args, **kwargs): self._reactor.callFromThread(func, self, *args, **kwargs) return wrapper class TwistedScheduler(BaseScheduler): """ A scheduler that runs on a Twisted reactor. Extra options: =========== ======================================================== ``reactor`` Reactor instance to use (defaults to the global reactor) =========== ======================================================== """ _reactor = None _delayedcall = None def _configure(self, config): self._reactor = maybe_ref(config.pop('reactor', default_reactor)) super(TwistedScheduler, self)._configure(config) @run_in_reactor def shutdown(self, wait=True): super(TwistedScheduler, self).shutdown(wait) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup) def _stop_timer(self): if self._delayedcall and self._delayedcall.active(): self._delayedcall.cancel() del self._delayedcall @run_in_reactor def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.twisted import TwistedExecutor return TwistedExecutor()
1,844
Python
.py
46
33.782609
82
0.640292
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,412
asyncio.py
rembo10_headphones/lib/apscheduler/schedulers/asyncio.py
from __future__ import absolute_import import asyncio from functools import wraps, partial from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref def run_in_event_loop(func): @wraps(func) def wrapper(self, *args, **kwargs): wrapped = partial(func, self, *args, **kwargs) self._eventloop.call_soon_threadsafe(wrapped) return wrapper class AsyncIOScheduler(BaseScheduler): """ A scheduler that runs on an asyncio (:pep:`3156`) event loop. The default executor can run jobs based on native coroutines (``async def``). Extra options: ============== ============================================================= ``event_loop`` AsyncIO event loop to use (defaults to the global event loop) ============== ============================================================= """ _eventloop = None _timeout = None def start(self, paused=False): if not self._eventloop: self._eventloop = asyncio.get_event_loop() super(AsyncIOScheduler, self).start(paused) @run_in_event_loop def shutdown(self, wait=True): super(AsyncIOScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._eventloop = maybe_ref(config.pop('event_loop', None)) super(AsyncIOScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup) def _stop_timer(self): if self._timeout: self._timeout.cancel() del self._timeout @run_in_event_loop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.asyncio import AsyncIOExecutor return AsyncIOExecutor()
1,994
Python
.py
49
33.959184
81
0.619295
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,413
tornado.py
rembo10_headphones/lib/apscheduler/schedulers/tornado.py
from __future__ import absolute_import from datetime import timedelta from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from tornado.ioloop import IOLoop except ImportError: # pragma: nocover raise ImportError('TornadoScheduler requires tornado installed') def run_in_ioloop(func): @wraps(func) def wrapper(self, *args, **kwargs): self._ioloop.add_callback(func, self, *args, **kwargs) return wrapper class TornadoScheduler(BaseScheduler): """ A scheduler that runs on a Tornado IOLoop. The default executor can run jobs based on native coroutines (``async def``). =========== =============================================================== ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) =========== =============================================================== """ _ioloop = None _timeout = None @run_in_ioloop def shutdown(self, wait=True): super(TornadoScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() super(TornadoScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) def _stop_timer(self): if self._timeout: self._ioloop.remove_timeout(self._timeout) del self._timeout def _create_default_executor(self): from apscheduler.executors.tornado import TornadoExecutor return TornadoExecutor() @run_in_ioloop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds)
1,926
Python
.py
47
34.787234
98
0.634997
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,414
qt.py
rembo10_headphones/lib/apscheduler/schedulers/qt.py
from __future__ import absolute_import from apscheduler.schedulers.base import BaseScheduler try: from PyQt5.QtCore import QObject, QTimer except (ImportError, RuntimeError): # pragma: nocover try: from PyQt4.QtCore import QObject, QTimer except ImportError: try: from PySide6.QtCore import QObject, QTimer # noqa except ImportError: try: from PySide2.QtCore import QObject, QTimer # noqa except ImportError: try: from PySide.QtCore import QObject, QTimer # noqa except ImportError: raise ImportError('QtScheduler requires either PyQt5, PyQt4, PySide6, PySide2 ' 'or PySide installed') class QtScheduler(BaseScheduler): """A scheduler that runs in a Qt event loop.""" _timer = None def shutdown(self, *args, **kwargs): super(QtScheduler, self).shutdown(*args, **kwargs) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: wait_time = min(int(wait_seconds * 1000), 2147483647) self._timer = QTimer.singleShot(wait_time, self._process_jobs) def _stop_timer(self): if self._timer: if self._timer.isActive(): self._timer.stop() del self._timer def wakeup(self): self._start_timer(0) def _process_jobs(self): wait_seconds = super(QtScheduler, self)._process_jobs() self._start_timer(wait_seconds)
1,613
Python
.py
40
30.225
99
0.609085
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,415
background.py
rembo10_headphones/lib/apscheduler/schedulers/background.py
from __future__ import absolute_import from threading import Thread, Event from apscheduler.schedulers.base import BaseScheduler from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.util import asbool class BackgroundScheduler(BlockingScheduler): """ A scheduler that runs in the background using a separate thread (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately). Extra options: ========== ============================================================================= ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see `the documentation <https://docs.python.org/3.4/library/threading.html#thread-objects>`_ for further details) ========== ============================================================================= """ _thread = None def _configure(self, config): self._daemon = asbool(config.pop('daemon', True)) super(BackgroundScheduler, self)._configure(config) def start(self, *args, **kwargs): if self._event is None or self._event.is_set(): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.daemon = self._daemon self._thread.start() def shutdown(self, *args, **kwargs): super(BackgroundScheduler, self).shutdown(*args, **kwargs) self._thread.join() del self._thread
1,566
Python
.py
32
41.8125
92
0.601445
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,416
__init__.py
rembo10_headphones/lib/apscheduler/schedulers/__init__.py
class SchedulerAlreadyRunningError(Exception): """Raised when attempting to start or configure the scheduler when it's already running.""" def __str__(self): return 'Scheduler is already running' class SchedulerNotRunningError(Exception): """Raised when attempting to shutdown the scheduler when it's not running.""" def __str__(self): return 'Scheduler is not running'
406
Python
.py
8
45.25
95
0.730964
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,417
blocking.py
rembo10_headphones/lib/apscheduler/schedulers/blocking.py
from __future__ import absolute_import from threading import Event from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED from apscheduler.util import TIMEOUT_MAX class BlockingScheduler(BaseScheduler): """ A scheduler that runs in the foreground (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block). """ _event = None def start(self, *args, **kwargs): if self._event is None or self._event.is_set(): self._event = Event() super(BlockingScheduler, self).start(*args, **kwargs) self._main_loop() def shutdown(self, wait=True): super(BlockingScheduler, self).shutdown(wait) self._event.set() def _main_loop(self): wait_seconds = TIMEOUT_MAX while self.state != STATE_STOPPED: self._event.wait(wait_seconds) self._event.clear() wait_seconds = self._process_jobs() def wakeup(self): self._event.set()
985
Python
.py
26
30.846154
74
0.661053
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,418
base.py
rembo10_headphones/lib/apscheduler/schedulers/base.py
from __future__ import print_function from abc import ABCMeta, abstractmethod from threading import RLock from datetime import datetime, timedelta from logging import getLogger import warnings import sys from pkg_resources import iter_entry_points from tzlocal import get_localzone import six from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.job import Job from apscheduler.triggers.base import BaseTrigger from apscheduler.util import ( asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined, TIMEOUT_MAX) from apscheduler.events import ( SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping #: constant indicating a scheduler's stopped state STATE_STOPPED = 0 #: constant indicating a scheduler's running state (started and processing jobs) STATE_RUNNING = 1 #: constant indicating a scheduler's paused state (started but not processing jobs) STATE_PAUSED = 2 class BaseScheduler(six.with_metaclass(ABCMeta)): """ Abstract base class for all schedulers. Takes the following keyword arguments: :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to apscheduler.scheduler) :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone) :param int|float jobstore_retry_interval: the minimum number of seconds to wait between retries in the scheduler's main loop if the job store raises an exception when getting the list of due jobs :param dict job_defaults: default values for newly added jobs :param dict jobstores: a dictionary of job store alias -> job store instance or configuration dict :param dict executors: a dictionary of executor alias -> executor instance or configuration dict :ivar int state: current running state of the scheduler (one of the following constants from ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``) .. seealso:: :ref:`scheduler-config` """ _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers')) _trigger_classes = {} _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors')) _executor_classes = {} _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores')) _jobstore_classes = {} # # Public API # def __init__(self, gconfig={}, **options): super(BaseScheduler, self).__init__() self._executors = {} self._executors_lock = self._create_lock() self._jobstores = {} self._jobstores_lock = self._create_lock() self._listeners = [] self._listeners_lock = self._create_lock() self._pending_jobs = [] self.state = STATE_STOPPED self.configure(gconfig, **options) def __getstate__(self): raise TypeError("Schedulers cannot be serialized. Ensure that you are not passing a " "scheduler instance as an argument to a job, or scheduling an instance " "method where the instance contains a scheduler as an attribute.") def configure(self, gconfig={}, prefix='apscheduler.', **options): """ Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running. :param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to this method :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string (pass an empty string or ``None`` to use all keys) :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError # If a non-empty prefix was given, strip it from the keys in the # global configuration dict if prefix: prefixlen = len(prefix) gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) if key.startswith(prefix)) # Create a structure from the dotted options # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) config = {} for key, value in six.iteritems(gconfig): parts = key.split('.') parent = config key = parts.pop(0) while parts: parent = parent.setdefault(key, {}) key = parts.pop(0) parent[key] = value # Override any options with explicit keyword arguments config.update(options) self._configure(config) def start(self, paused=False): """ Start the configured executors and job stores and begin processing scheduled jobs. :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called :raises SchedulerAlreadyRunningError: if the scheduler is already running :raises RuntimeError: if running under uWSGI with threads disabled """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError self._check_uwsgi() with self._executors_lock: # Create a default executor if nothing else is configured if 'default' not in self._executors: self.add_executor(self._create_default_executor(), 'default') # Start all the executors for alias, executor in six.iteritems(self._executors): executor.start(self, alias) with self._jobstores_lock: # Create a default job store if nothing else is configured if 'default' not in self._jobstores: self.add_jobstore(self._create_default_jobstore(), 'default') # Start all the job stores for alias, store in six.iteritems(self._jobstores): store.start(self, alias) # Schedule all pending jobs for job, jobstore_alias, replace_existing in self._pending_jobs: self._real_add_job(job, jobstore_alias, replace_existing) del self._pending_jobs[:] self.state = STATE_PAUSED if paused else STATE_RUNNING self._logger.info('Scheduler started') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) if not paused: self.wakeup() @abstractmethod def shutdown(self, wait=True): """ Shuts down the scheduler, along with its executors and job stores. Does not interrupt any currently running jobs. :param bool wait: ``True`` to wait until all currently executing jobs have finished :raises SchedulerNotRunningError: if the scheduler has not been started yet """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError self.state = STATE_STOPPED # Shut down all executors with self._executors_lock, self._jobstores_lock: for executor in six.itervalues(self._executors): executor.shutdown(wait) # Shut down all job stores for jobstore in six.itervalues(self._jobstores): jobstore.shutdown() self._logger.info('Scheduler has been shut down') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) def pause(self): """ Pause job processing in the scheduler. This will prevent the scheduler from waking up to do job processing until :meth:`resume` is called. It will not however stop any already running job processing. """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_RUNNING: self.state = STATE_PAUSED self._logger.info('Paused scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED)) def resume(self): """Resume job processing in the scheduler.""" if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_PAUSED: self.state = STATE_RUNNING self._logger.info('Resumed scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED)) self.wakeup() @property def running(self): """ Return ``True`` if the scheduler has been started. This is a shortcut for ``scheduler.state != STATE_STOPPED``. """ return self.state != STATE_STOPPED def add_executor(self, executor, alias='default', **executor_opts): """ Adds an executor to this scheduler. Any extra keyword arguments will be passed to the executor plugin's constructor, assuming that the first argument is the name of an executor plugin. :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor instance or the name of an executor plugin :param str|unicode alias: alias for the scheduler :raises ValueError: if there is already an executor by the given alias """ with self._executors_lock: if alias in self._executors: raise ValueError('This scheduler already has an executor by the alias of "%s"' % alias) if isinstance(executor, BaseExecutor): self._executors[alias] = executor elif isinstance(executor, six.string_types): self._executors[alias] = executor = self._create_plugin_instance( 'executor', executor, executor_opts) else: raise TypeError('Expected an executor instance or a string, got %s instead' % executor.__class__.__name__) # Start the executor right away if the scheduler is running if self.state != STATE_STOPPED: executor.start(self, alias) self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) def remove_executor(self, alias, shutdown=True): """ Removes the executor by the given alias from this scheduler. :param str|unicode alias: alias of the executor :param bool shutdown: ``True`` to shut down the executor after removing it """ with self._executors_lock: executor = self._lookup_executor(alias) del self._executors[alias] if shutdown: executor.shutdown() self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) def add_jobstore(self, jobstore, alias='default', **jobstore_opts): """ Adds a job store to this scheduler. Any extra keyword arguments will be passed to the job store plugin's constructor, assuming that the first argument is the name of a job store plugin. :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added :param str|unicode alias: alias for the job store :raises ValueError: if there is already a job store by the given alias """ with self._jobstores_lock: if alias in self._jobstores: raise ValueError('This scheduler already has a job store by the alias of "%s"' % alias) if isinstance(jobstore, BaseJobStore): self._jobstores[alias] = jobstore elif isinstance(jobstore, six.string_types): self._jobstores[alias] = jobstore = self._create_plugin_instance( 'jobstore', jobstore, jobstore_opts) else: raise TypeError('Expected a job store instance or a string, got %s instead' % jobstore.__class__.__name__) # Start the job store right away if the scheduler isn't stopped if self.state != STATE_STOPPED: jobstore.start(self, alias) # Notify listeners that a new job store has been added self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) # Notify the scheduler so it can scan the new job store for jobs if self.state != STATE_STOPPED: self.wakeup() def remove_jobstore(self, alias, shutdown=True): """ Removes the job store by the given alias from this scheduler. :param str|unicode alias: alias of the job store :param bool shutdown: ``True`` to shut down the job store after removing it """ with self._jobstores_lock: jobstore = self._lookup_jobstore(alias) del self._jobstores[alias] if shutdown: jobstore.shutdown() self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) def add_listener(self, callback, mask=EVENT_ALL): """ add_listener(callback, mask=EVENT_ALL) Adds a listener for scheduler events. When a matching event occurs, ``callback`` is executed with the event object as its sole argument. If the ``mask`` parameter is not provided, the callback will receive events of all types. :param callback: any callable that takes one argument :param int mask: bitmask that indicates which events should be listened to .. seealso:: :mod:`apscheduler.events` .. seealso:: :ref:`scheduler-events` """ with self._listeners_lock: self._listeners.append((callback, mask)) def remove_listener(self, callback): """Removes a previously added event listener.""" with self._listeners_lock: for i, (cb, _) in enumerate(self._listeners): if callback == cb: del self._listeners[i] def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', replace_existing=False, **trigger_args): """ add_job(func, trigger=None, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, coalesce=undefined, \ max_instances=undefined, next_run_time=undefined, \ jobstore='default', executor='default', \ replace_existing=False, **trigger_args) Adds the given job to the job list and wakes up the scheduler if it's already running. Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running). The ``func`` argument can be given either as a callable object or a textual reference in the ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the second half is a reference to the callable object, relative to the module. The ``trigger`` argument can either be: #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword arguments to this method are passed on to the trigger's constructor #. an instance of a trigger class :param func: callable (or a textual reference to one) to run at the given time :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called :param list|tuple args: list of positional arguments to call func with :param dict kwargs: dict of keyword arguments to call func with :param str|unicode id: explicit identifier for the job (for modifying it later) :param str|unicode name: textual description of the job :param int misfire_grace_time: seconds after the designated runtime that the job is still allowed to be run (or ``None`` to allow the job to run no matter how late it is) :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession :param int max_instances: maximum number of concurrently running instances allowed for this job :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the job as paused) :param str|unicode jobstore: alias of the job store to store the job in :param str|unicode executor: alias of the executor to run the job with :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the number of runs from the existing one) :rtype: Job """ job_kwargs = { 'trigger': self._create_trigger(trigger, trigger_args), 'executor': executor, 'func': func, 'args': tuple(args) if args is not None else (), 'kwargs': dict(kwargs) if kwargs is not None else {}, 'id': id, 'name': name, 'misfire_grace_time': misfire_grace_time, 'coalesce': coalesce, 'max_instances': max_instances, 'next_run_time': next_run_time } job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined) job = Job(self, **job_kwargs) # Don't really add jobs to job stores before the scheduler is up and running with self._jobstores_lock: if self.state == STATE_STOPPED: self._pending_jobs.append((job, jobstore, replace_existing)) self._logger.info('Adding job tentatively -- it will be properly scheduled when ' 'the scheduler starts') else: self._real_add_job(job, jobstore, replace_existing) return job def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', **trigger_args): """ scheduled_job(trigger, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, \ coalesce=undefined, max_instances=undefined, \ next_run_time=undefined, jobstore='default', \ executor='default',**trigger_args) A decorator version of :meth:`add_job`, except that ``replace_existing`` is always ``True``. .. important:: The ``id`` argument must be given if scheduling a job in a persistent job store. The scheduler cannot, however, enforce this requirement. """ def inner(func): self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, max_instances, next_run_time, jobstore, executor, True, **trigger_args) return func return inner def modify_job(self, job_id, jobstore=None, **changes): """ Modifies the properties of a single job. Modifications are passed to this method as extra keyword arguments. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) job._modify(**changes) if jobstore: self._lookup_jobstore(jobstore).update_job(job) self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore)) # Wake up the scheduler since the job's next run time may have been changed if self.state == STATE_RUNNING: self.wakeup() return job def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): """ Constructs a new trigger for a job and updates its next run time. Extra keyword arguments are passed directly to the trigger's constructor. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :param trigger: alias of the trigger type or a trigger instance :return Job: the relevant job instance """ trigger = self._create_trigger(trigger, trigger_args) now = datetime.now(self.timezone) next_run_time = trigger.get_next_fire_time(None, now) return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) def pause_job(self, job_id, jobstore=None): """ Causes the given job not to be executed until it is explicitly resumed. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ return self.modify_job(job_id, jobstore, next_run_time=None) def resume_job(self, job_id, jobstore=None): """ Resumes the schedule of the given job, or removes the job if its schedule is finished. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no next run time could be calculated and the job was removed """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) now = datetime.now(self.timezone) next_run_time = job.trigger.get_next_fire_time(None, now) if next_run_time: return self.modify_job(job_id, jobstore, next_run_time=next_run_time) else: self.remove_job(job.id, jobstore) def get_jobs(self, jobstore=None, pending=None): """ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a specific job store or from all of them. If the scheduler has not been started yet, only pending jobs can be returned because the job stores haven't been started yet either. :param str|unicode jobstore: alias of the job store :param bool pending: **DEPRECATED** :rtype: list[Job] """ if pending is not None: warnings.warn('The "pending" option is deprecated -- get_jobs() always returns ' 'scheduled jobs if the scheduler has been started and pending jobs ' 'otherwise', DeprecationWarning) with self._jobstores_lock: jobs = [] if self.state == STATE_STOPPED: for job, alias, replace_existing in self._pending_jobs: if jobstore is None or alias == jobstore: jobs.append(job) else: for alias, store in six.iteritems(self._jobstores): if jobstore is None or alias == jobstore: jobs.extend(store.get_all_jobs()) return jobs def get_job(self, job_id, jobstore=None): """ Returns the Job that matches the given ``job_id``. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that most likely contains the job :return: the Job by the given ID, or ``None`` if it wasn't found :rtype: Job """ with self._jobstores_lock: try: return self._lookup_job(job_id, jobstore)[0] except JobLookupError: return def remove_job(self, job_id, jobstore=None): """ Removes a job, preventing it from being run any more. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :raises JobLookupError: if the job was not found """ jobstore_alias = None with self._jobstores_lock: # Check if the job is among the pending jobs if self.state == STATE_STOPPED: for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): if job.id == job_id and jobstore in (None, alias): del self._pending_jobs[i] jobstore_alias = alias break else: # Otherwise, try to remove it from each store until it succeeds or we run out of # stores to check for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): try: store.remove_job(job_id) jobstore_alias = alias break except JobLookupError: continue if jobstore_alias is None: raise JobLookupError(job_id) # Notify listeners that a job has been removed event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias) self._dispatch_event(event) self._logger.info('Removed job %s', job_id) def remove_all_jobs(self, jobstore=None): """ Removes all jobs from the specified job store, or all job stores if none is given. :param str|unicode jobstore: alias of the job store """ with self._jobstores_lock: if self.state == STATE_STOPPED: if jobstore: self._pending_jobs = [pending for pending in self._pending_jobs if pending[1] != jobstore] else: self._pending_jobs = [] else: for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): store.remove_all_jobs() self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) def print_jobs(self, jobstore=None, out=None): """ print_jobs(jobstore=None, out=sys.stdout) Prints out a textual listing of all jobs currently scheduled on either all job stores or just a specific one. :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given) """ out = out or sys.stdout with self._jobstores_lock: if self.state == STATE_STOPPED: print(u'Pending jobs:', file=out) if self._pending_jobs: for job, jobstore_alias, replace_existing in self._pending_jobs: if jobstore in (None, jobstore_alias): print(u' %s' % job, file=out) else: print(u' No pending jobs', file=out) else: for alias, store in sorted(six.iteritems(self._jobstores)): if jobstore in (None, alias): print(u'Jobstore %s:' % alias, file=out) jobs = store.get_all_jobs() if jobs: for job in jobs: print(u' %s' % job, file=out) else: print(u' No scheduled jobs', file=out) @abstractmethod def wakeup(self): """ Notifies the scheduler that there may be jobs due for execution. Triggers :meth:`_process_jobs` to be run in an implementation specific manner. """ # # Private API # def _configure(self, config): # Set general options self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler') self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10)) # Set the job defaults job_defaults = config.get('job_defaults', {}) self._job_defaults = { 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), 'coalesce': asbool(job_defaults.get('coalesce', True)), 'max_instances': asint(job_defaults.get('max_instances', 1)) } # Configure executors self._executors.clear() for alias, value in six.iteritems(config.get('executors', {})): if isinstance(value, BaseExecutor): self.add_executor(value, alias) elif isinstance(value, MutableMapping): executor_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: executor = self._create_plugin_instance('executor', plugin, value) elif executor_class: cls = maybe_ref(executor_class) executor = cls(**value) else: raise ValueError( 'Cannot create executor "%s" -- either "type" or "class" must be defined' % alias) self.add_executor(executor, alias) else: raise TypeError( "Expected executor instance or dict for executors['%s'], got %s instead" % (alias, value.__class__.__name__)) # Configure job stores self._jobstores.clear() for alias, value in six.iteritems(config.get('jobstores', {})): if isinstance(value, BaseJobStore): self.add_jobstore(value, alias) elif isinstance(value, MutableMapping): jobstore_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: jobstore = self._create_plugin_instance('jobstore', plugin, value) elif jobstore_class: cls = maybe_ref(jobstore_class) jobstore = cls(**value) else: raise ValueError( 'Cannot create job store "%s" -- either "type" or "class" must be ' 'defined' % alias) self.add_jobstore(jobstore, alias) else: raise TypeError( "Expected job store instance or dict for jobstores['%s'], got %s instead" % (alias, value.__class__.__name__)) def _create_default_executor(self): """Creates a default executor store, specific to the particular scheduler type.""" return ThreadPoolExecutor() def _create_default_jobstore(self): """Creates a default job store, specific to the particular scheduler type.""" return MemoryJobStore() def _lookup_executor(self, alias): """ Returns the executor instance by the given name from the list of executors that were added to this scheduler. :type alias: str :raises KeyError: if no executor by the given alias is not found """ try: return self._executors[alias] except KeyError: raise KeyError('No such executor: %s' % alias) def _lookup_jobstore(self, alias): """ Returns the job store instance by the given name from the list of job stores that were added to this scheduler. :type alias: str :raises KeyError: if no job store by the given alias is not found """ try: return self._jobstores[alias] except KeyError: raise KeyError('No such job store: %s' % alias) def _lookup_job(self, job_id, jobstore_alias): """ Finds a job by its ID. :type job_id: str :param str jobstore_alias: alias of a job store to look in :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job) :raises JobLookupError: if no job by the given ID is found. """ if self.state == STATE_STOPPED: # Check if the job is among the pending jobs for job, alias, replace_existing in self._pending_jobs: if job.id == job_id: return job, None else: # Look in all job stores for alias, store in six.iteritems(self._jobstores): if jobstore_alias in (None, alias): job = store.lookup_job(job_id) if job is not None: return job, alias raise JobLookupError(job_id) def _dispatch_event(self, event): """ Dispatches the given event to interested listeners. :param SchedulerEvent event: the event to send """ with self._listeners_lock: listeners = tuple(self._listeners) for cb, mask in listeners: if event.code & mask: try: cb(event) except BaseException: self._logger.exception('Error notifying listener') def _check_uwsgi(self): """Check if we're running under uWSGI with threads disabled.""" uwsgi_module = sys.modules.get('uwsgi') if not getattr(uwsgi_module, 'has_threads', True): raise RuntimeError('The scheduler seems to be running under uWSGI, but threads have ' 'been disabled. You must run uWSGI with the --enable-threads ' 'option for the scheduler to work.') def _real_add_job(self, job, jobstore_alias, replace_existing): """ :param Job job: the job to add :param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store """ # Fill in undefined values with defaults replacements = {} for key, value in six.iteritems(self._job_defaults): if not hasattr(job, key): replacements[key] = value # Calculate the next run time if there is none defined if not hasattr(job, 'next_run_time'): now = datetime.now(self.timezone) replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now) # Apply any replacements job._modify(**replacements) # Add the job to the given job store store = self._lookup_jobstore(jobstore_alias) try: store.add_job(job) except ConflictingIdError: if replace_existing: store.update_job(job) else: raise # Mark the job as no longer pending job._jobstore_alias = jobstore_alias # Notify listeners that a new job has been added event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) self._dispatch_event(event) self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) # Notify the scheduler about the new job if self.state == STATE_RUNNING: self.wakeup() def _create_plugin_instance(self, type_, alias, constructor_kwargs): """Creates an instance of the given plugin type, loading the plugin first if necessary.""" plugin_container, class_container, base_class = { 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger), 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore), 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor) }[type_] try: plugin_cls = class_container[alias] except KeyError: if alias in plugin_container: plugin_cls = class_container[alias] = plugin_container[alias].load() if not issubclass(plugin_cls, base_class): raise TypeError('The {0} entry point does not point to a {0} class'. format(type_)) else: raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias)) return plugin_cls(**constructor_kwargs) def _create_trigger(self, trigger, trigger_args): if isinstance(trigger, BaseTrigger): return trigger elif trigger is None: trigger = 'date' elif not isinstance(trigger, six.string_types): raise TypeError('Expected a trigger instance or string, got %s instead' % trigger.__class__.__name__) # Use the scheduler's time zone if nothing else is specified trigger_args.setdefault('timezone', self.timezone) # Instantiate the trigger class return self._create_plugin_instance('trigger', trigger, trigger_args) def _create_lock(self): """Creates a reentrant lock object.""" return RLock() def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning('Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[-1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug('Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min(max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
43,228
Python
.py
838
38.899761
99
0.602412
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,419
__init__.py
rembo10_headphones/lib/zc/lockfile/__init__.py
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## import os import errno import logging logger = logging.getLogger("zc.lockfile") __metaclass__ = type class LockError(Exception): """Couldn't get a lock """ try: import fcntl except ImportError: try: import msvcrt except ImportError: def _lock_file(file): raise TypeError('No file-locking support on this platform') def _unlock_file(file): raise TypeError('No file-locking support on this platform') else: # Windows def _lock_file(file): # Lock just the first byte try: msvcrt.locking(file.fileno(), msvcrt.LK_NBLCK, 1) except IOError: raise LockError("Couldn't lock %r" % file.name) def _unlock_file(file): try: file.seek(0) msvcrt.locking(file.fileno(), msvcrt.LK_UNLCK, 1) except IOError: raise LockError("Couldn't unlock %r" % file.name) else: # Unix _flags = fcntl.LOCK_EX | fcntl.LOCK_NB def _lock_file(file): try: fcntl.flock(file.fileno(), _flags) except IOError: raise LockError("Couldn't lock %r" % file.name) def _unlock_file(file): fcntl.flock(file.fileno(), fcntl.LOCK_UN) class LazyHostName: """Avoid importing socket and calling gethostname() unnecessarily""" def __str__(self): import socket return socket.gethostname() class SimpleLockFile: _fp = None def __init__(self, path): self._path = path try: # Try to open for writing without truncation: fp = open(path, 'r+') except IOError: # If the file doesn't exist, we'll get an IO error, try a+ # Note that there may be a race here. Multiple processes # could fail on the r+ open and open the file a+, but only # one will get the the lock and write a pid. fp = open(path, 'a+') try: _lock_file(fp) self._fp = fp except: fp.close() raise # Lock acquired self._on_lock() fp.flush() def close(self): if self._fp is not None: _unlock_file(self._fp) self._fp.close() self._fp = None def _on_lock(self): """ Allow subclasses to supply behavior to occur following lock acquisition. """ class LockFile(SimpleLockFile): def __init__(self, path, content_template='{pid}'): self._content_template = content_template super(LockFile, self).__init__(path) def _on_lock(self): content = self._content_template.format( pid=os.getpid(), hostname=LazyHostName(), ) self._fp.write(" %s\n" % content) self._fp.truncate()
3,521
Python
.py
103
26.252427
78
0.569788
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,420
tests.py
rembo10_headphones/lib/zc/lockfile/tests.py
############################################################################## # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os, re, sys, unittest, doctest import zc.lockfile, time, threading from zope.testing import renormalizing, setupstack import tempfile try: from unittest.mock import Mock, patch except ImportError: from mock import Mock, patch checker = renormalizing.RENormalizing([ # Python 3 adds module path to error class name. (re.compile("zc\.lockfile\.LockError:"), r"LockError:"), ]) def inc(): while 1: try: lock = zc.lockfile.LockFile('f.lock') except zc.lockfile.LockError: continue else: break f = open('f', 'r+b') v = int(f.readline().strip()) time.sleep(0.01) v += 1 f.seek(0) f.write(('%d\n' % v).encode('ASCII')) f.close() lock.close() def many_threads_read_and_write(): r""" >>> with open('f', 'w+b') as file: ... _ = file.write(b'0\n') >>> with open('f.lock', 'w+b') as file: ... _ = file.write(b'0\n') >>> n = 50 >>> threads = [threading.Thread(target=inc) for i in range(n)] >>> _ = [thread.start() for thread in threads] >>> _ = [thread.join() for thread in threads] >>> with open('f', 'rb') as file: ... saved = int(file.read().strip()) >>> saved == n True >>> os.remove('f') We should only have one pid in the lock file: >>> f = open('f.lock') >>> len(f.read().strip().split()) 1 >>> f.close() >>> os.remove('f.lock') """ def pid_in_lockfile(): r""" >>> import os, zc.lockfile >>> pid = os.getpid() >>> lock = zc.lockfile.LockFile("f.lock") >>> f = open("f.lock") >>> _ = f.seek(1) >>> f.read().strip() == str(pid) True >>> f.close() Make sure that locking twice does not overwrite the old pid: >>> lock = zc.lockfile.LockFile("f.lock") Traceback (most recent call last): ... LockError: Couldn't lock 'f.lock' >>> f = open("f.lock") >>> _ = f.seek(1) >>> f.read().strip() == str(pid) True >>> f.close() >>> lock.close() """ def hostname_in_lockfile(): r""" hostname is correctly written into the lock file when it's included in the lock file content template >>> import zc.lockfile >>> with patch('socket.gethostname', Mock(return_value='myhostname')): ... lock = zc.lockfile.LockFile("f.lock", content_template='{hostname}') >>> f = open("f.lock") >>> _ = f.seek(1) >>> f.read().rstrip() 'myhostname' >>> f.close() Make sure that locking twice does not overwrite the old hostname: >>> lock = zc.lockfile.LockFile("f.lock", content_template='{hostname}') Traceback (most recent call last): ... LockError: Couldn't lock 'f.lock' >>> f = open("f.lock") >>> _ = f.seek(1) >>> f.read().rstrip() 'myhostname' >>> f.close() >>> lock.close() """ class TestLogger(object): def __init__(self): self.log_entries = [] def exception(self, msg, *args): self.log_entries.append((msg,) + args) class LockFileLogEntryTestCase(unittest.TestCase): """Tests for logging in case of lock failure""" def setUp(self): self.here = os.getcwd() self.tmp = tempfile.mkdtemp(prefix='zc.lockfile-test-') os.chdir(self.tmp) def tearDown(self): os.chdir(self.here) setupstack.rmtree(self.tmp) def test_log_formatting(self): # PID and hostname are parsed and logged from lock file on failure with patch('os.getpid', Mock(return_value=123)): with patch('socket.gethostname', Mock(return_value='myhostname')): lock = zc.lockfile.LockFile('f.lock', content_template='{pid}/{hostname}') with open('f.lock') as f: self.assertEqual(' 123/myhostname\n', f.read()) lock.close() def test_unlock_and_lock_while_multiprocessing_process_running(self): import multiprocessing lock = zc.lockfile.LockFile('l') q = multiprocessing.Queue() p = multiprocessing.Process(target=q.get) p.daemon = True p.start() # release and re-acquire should work (obviously) lock.close() lock = zc.lockfile.LockFile('l') self.assertTrue(p.is_alive()) q.put(0) lock.close() p.join() def test_simple_lock(self): assert isinstance(zc.lockfile.SimpleLockFile, type) lock = zc.lockfile.SimpleLockFile('s') with self.assertRaises(zc.lockfile.LockError): zc.lockfile.SimpleLockFile('s') lock.close() zc.lockfile.SimpleLockFile('s').close() def test_suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocFileSuite( 'README.txt', checker=checker, setUp=setupstack.setUpDirectory, tearDown=setupstack.tearDown)) suite.addTest(doctest.DocTestSuite( setUp=setupstack.setUpDirectory, tearDown=setupstack.tearDown, checker=checker)) # Add unittest test cases from this module suite.addTest(unittest.defaultTestLoader.loadTestsFromName(__name__)) return suite
5,814
Python
.py
165
29.018182
80
0.595582
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,421
more.pyi
rembo10_headphones/lib/more_itertools/more.pyi
"""Stubs for more_itertools.more""" from typing import ( Any, Callable, Container, Dict, Generic, Hashable, Iterable, Iterator, List, Optional, Reversible, Sequence, Sized, Tuple, Union, TypeVar, type_check_only, ) from types import TracebackType from typing_extensions import ContextManager, Protocol, Type, overload # Type and type variable definitions _T = TypeVar('_T') _T1 = TypeVar('_T1') _T2 = TypeVar('_T2') _U = TypeVar('_U') _V = TypeVar('_V') _W = TypeVar('_W') _T_co = TypeVar('_T_co', covariant=True) _GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[object]]) _Raisable = Union[BaseException, 'Type[BaseException]'] @type_check_only class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ... @type_check_only class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ... def chunked( iterable: Iterable[_T], n: Optional[int], strict: bool = ... ) -> Iterator[List[_T]]: ... @overload def first(iterable: Iterable[_T]) -> _T: ... @overload def first(iterable: Iterable[_T], default: _U) -> Union[_T, _U]: ... @overload def last(iterable: Iterable[_T]) -> _T: ... @overload def last(iterable: Iterable[_T], default: _U) -> Union[_T, _U]: ... @overload def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ... @overload def nth_or_last( iterable: Iterable[_T], n: int, default: _U ) -> Union[_T, _U]: ... class peekable(Generic[_T], Iterator[_T]): def __init__(self, iterable: Iterable[_T]) -> None: ... def __iter__(self) -> peekable[_T]: ... def __bool__(self) -> bool: ... @overload def peek(self) -> _T: ... @overload def peek(self, default: _U) -> Union[_T, _U]: ... def prepend(self, *items: _T) -> None: ... def __next__(self) -> _T: ... @overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice) -> List[_T]: ... def collate(*iterables: Iterable[_T], **kwargs: Any) -> Iterable[_T]: ... def consumer(func: _GenFn) -> _GenFn: ... def ilen(iterable: Iterable[object]) -> int: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... def with_iter( context_manager: ContextManager[Iterable[_T]], ) -> Iterator[_T]: ... def one( iterable: Iterable[_T], too_short: Optional[_Raisable] = ..., too_long: Optional[_Raisable] = ..., ) -> _T: ... def raise_(exception: _Raisable, *args: Any) -> None: ... def strictly_n( iterable: Iterable[_T], n: int, too_short: Optional[_GenFn] = ..., too_long: Optional[_GenFn] = ..., ) -> List[_T]: ... def distinct_permutations( iterable: Iterable[_T], r: Optional[int] = ... ) -> Iterator[Tuple[_T, ...]]: ... def intersperse( e: _U, iterable: Iterable[_T], n: int = ... ) -> Iterator[Union[_T, _U]]: ... def unique_to_each(*iterables: Iterable[_T]) -> List[List[_T]]: ... @overload def windowed( seq: Iterable[_T], n: int, *, step: int = ... ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def windowed( seq: Iterable[_T], n: int, fillvalue: _U, step: int = ... ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... def substrings(iterable: Iterable[_T]) -> Iterator[Tuple[_T, ...]]: ... def substrings_indexes( seq: Sequence[_T], reverse: bool = ... ) -> Iterator[Tuple[Sequence[_T], int, int]]: ... class bucket(Generic[_T, _U], Container[_U]): def __init__( self, iterable: Iterable[_T], key: Callable[[_T], _U], validator: Optional[Callable[[object], object]] = ..., ) -> None: ... def __contains__(self, value: object) -> bool: ... def __iter__(self) -> Iterator[_U]: ... def __getitem__(self, value: object) -> Iterator[_T]: ... def spy( iterable: Iterable[_T], n: int = ... ) -> Tuple[List[_T], Iterator[_T]]: ... def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ... def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ... def interleave_evenly( iterables: List[Iterable[_T]], lengths: Optional[List[int]] = ... ) -> Iterator[_T]: ... def collapse( iterable: Iterable[Any], base_type: Optional[type] = ..., levels: Optional[int] = ..., ) -> Iterator[Any]: ... @overload def side_effect( func: Callable[[_T], object], iterable: Iterable[_T], chunk_size: None = ..., before: Optional[Callable[[], object]] = ..., after: Optional[Callable[[], object]] = ..., ) -> Iterator[_T]: ... @overload def side_effect( func: Callable[[List[_T]], object], iterable: Iterable[_T], chunk_size: int, before: Optional[Callable[[], object]] = ..., after: Optional[Callable[[], object]] = ..., ) -> Iterator[_T]: ... def sliced( seq: Sequence[_T], n: int, strict: bool = ... ) -> Iterator[Sequence[_T]]: ... def split_at( iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ..., keep_separator: bool = ..., ) -> Iterator[List[_T]]: ... def split_before( iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ... ) -> Iterator[List[_T]]: ... def split_after( iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ... ) -> Iterator[List[_T]]: ... def split_when( iterable: Iterable[_T], pred: Callable[[_T, _T], object], maxsplit: int = ..., ) -> Iterator[List[_T]]: ... def split_into( iterable: Iterable[_T], sizes: Iterable[Optional[int]] ) -> Iterator[List[_T]]: ... @overload def padded( iterable: Iterable[_T], *, n: Optional[int] = ..., next_multiple: bool = ... ) -> Iterator[Optional[_T]]: ... @overload def padded( iterable: Iterable[_T], fillvalue: _U, n: Optional[int] = ..., next_multiple: bool = ..., ) -> Iterator[Union[_T, _U]]: ... @overload def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ... @overload def repeat_last( iterable: Iterable[_T], default: _U ) -> Iterator[Union[_T, _U]]: ... def distribute(n: int, iterable: Iterable[_T]) -> List[Iterator[_T]]: ... @overload def stagger( iterable: Iterable[_T], offsets: _SizedIterable[int] = ..., longest: bool = ..., ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def stagger( iterable: Iterable[_T], offsets: _SizedIterable[int] = ..., longest: bool = ..., fillvalue: _U = ..., ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... class UnequalIterablesError(ValueError): def __init__( self, details: Optional[Tuple[int, int, int]] = ... ) -> None: ... @overload def zip_equal(__iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]: ... @overload def zip_equal( __iter1: Iterable[_T1], __iter2: Iterable[_T2] ) -> Iterator[Tuple[_T1, _T2]]: ... @overload def zip_equal( __iter1: Iterable[_T], __iter2: Iterable[_T], __iter3: Iterable[_T], *iterables: Iterable[_T] ) -> Iterator[Tuple[_T, ...]]: ... @overload def zip_offset( __iter1: Iterable[_T1], *, offsets: _SizedIterable[int], longest: bool = ..., fillvalue: None = None ) -> Iterator[Tuple[Optional[_T1]]]: ... @overload def zip_offset( __iter1: Iterable[_T1], __iter2: Iterable[_T2], *, offsets: _SizedIterable[int], longest: bool = ..., fillvalue: None = None ) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ... @overload def zip_offset( __iter1: Iterable[_T], __iter2: Iterable[_T], __iter3: Iterable[_T], *iterables: Iterable[_T], offsets: _SizedIterable[int], longest: bool = ..., fillvalue: None = None ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def zip_offset( __iter1: Iterable[_T1], *, offsets: _SizedIterable[int], longest: bool = ..., fillvalue: _U, ) -> Iterator[Tuple[Union[_T1, _U]]]: ... @overload def zip_offset( __iter1: Iterable[_T1], __iter2: Iterable[_T2], *, offsets: _SizedIterable[int], longest: bool = ..., fillvalue: _U, ) -> Iterator[Tuple[Union[_T1, _U], Union[_T2, _U]]]: ... @overload def zip_offset( __iter1: Iterable[_T], __iter2: Iterable[_T], __iter3: Iterable[_T], *iterables: Iterable[_T], offsets: _SizedIterable[int], longest: bool = ..., fillvalue: _U, ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... def sort_together( iterables: Iterable[Iterable[_T]], key_list: Iterable[int] = ..., key: Optional[Callable[..., Any]] = ..., reverse: bool = ..., ) -> List[Tuple[_T, ...]]: ... def unzip(iterable: Iterable[Sequence[_T]]) -> Tuple[Iterator[_T], ...]: ... def divide(n: int, iterable: Iterable[_T]) -> List[Iterator[_T]]: ... def always_iterable( obj: object, base_type: Union[ type, Tuple[Union[type, Tuple[Any, ...]], ...], None ] = ..., ) -> Iterator[Any]: ... def adjacent( predicate: Callable[[_T], bool], iterable: Iterable[_T], distance: int = ..., ) -> Iterator[Tuple[bool, _T]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None = None, valuefunc: None = None, reducefunc: None = None, ) -> Iterator[Tuple[_T, Iterator[_T]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: None, reducefunc: None, ) -> Iterator[Tuple[_U, Iterator[_T]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None, valuefunc: Callable[[_T], _V], reducefunc: None, ) -> Iterable[Tuple[_T, Iterable[_V]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], reducefunc: None, ) -> Iterable[Tuple[_U, Iterator[_V]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None, valuefunc: None, reducefunc: Callable[[Iterator[_T]], _W], ) -> Iterable[Tuple[_T, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: None, reducefunc: Callable[[Iterator[_T]], _W], ) -> Iterable[Tuple[_U, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None, valuefunc: Callable[[_T], _V], reducefunc: Callable[[Iterable[_V]], _W], ) -> Iterable[Tuple[_T, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], reducefunc: Callable[[Iterable[_V]], _W], ) -> Iterable[Tuple[_U, _W]]: ... class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]): @overload def __init__(self, __stop: _T) -> None: ... @overload def __init__(self, __start: _T, __stop: _T) -> None: ... @overload def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ... def __bool__(self) -> bool: ... def __contains__(self, elem: object) -> bool: ... def __eq__(self, other: object) -> bool: ... @overload def __getitem__(self, key: int) -> _T: ... @overload def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ... def __hash__(self) -> int: ... def __iter__(self) -> Iterator[_T]: ... def __len__(self) -> int: ... def __reduce__( self, ) -> Tuple[Type[numeric_range[_T, _U]], Tuple[_T, _T, _U]]: ... def __repr__(self) -> str: ... def __reversed__(self) -> Iterator[_T]: ... def count(self, value: _T) -> int: ... def index(self, value: _T) -> int: ... # type: ignore def count_cycle( iterable: Iterable[_T], n: Optional[int] = ... ) -> Iterable[Tuple[int, _T]]: ... def mark_ends( iterable: Iterable[_T], ) -> Iterable[Tuple[bool, bool, _T]]: ... def locate( iterable: Iterable[object], pred: Callable[..., Any] = ..., window_size: Optional[int] = ..., ) -> Iterator[int]: ... def lstrip( iterable: Iterable[_T], pred: Callable[[_T], object] ) -> Iterator[_T]: ... def rstrip( iterable: Iterable[_T], pred: Callable[[_T], object] ) -> Iterator[_T]: ... def strip( iterable: Iterable[_T], pred: Callable[[_T], object] ) -> Iterator[_T]: ... class islice_extended(Generic[_T], Iterator[_T]): def __init__( self, iterable: Iterable[_T], *args: Optional[int] ) -> None: ... def __iter__(self) -> islice_extended[_T]: ... def __next__(self) -> _T: ... def __getitem__(self, index: slice) -> islice_extended[_T]: ... def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ... def consecutive_groups( iterable: Iterable[_T], ordering: Callable[[_T], int] = ... ) -> Iterator[Iterator[_T]]: ... @overload def difference( iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: None = ... ) -> Iterator[Union[_T, _U]]: ... @overload def difference( iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U ) -> Iterator[_U]: ... class SequenceView(Generic[_T], Sequence[_T]): def __init__(self, target: Sequence[_T]) -> None: ... @overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def __len__(self) -> int: ... class seekable(Generic[_T], Iterator[_T]): def __init__( self, iterable: Iterable[_T], maxlen: Optional[int] = ... ) -> None: ... def __iter__(self) -> seekable[_T]: ... def __next__(self) -> _T: ... def __bool__(self) -> bool: ... @overload def peek(self) -> _T: ... @overload def peek(self, default: _U) -> Union[_T, _U]: ... def elements(self) -> SequenceView[_T]: ... def seek(self, index: int) -> None: ... class run_length: @staticmethod def encode(iterable: Iterable[_T]) -> Iterator[Tuple[_T, int]]: ... @staticmethod def decode(iterable: Iterable[Tuple[_T, int]]) -> Iterator[_T]: ... def exactly_n( iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ... ) -> bool: ... def circular_shifts(iterable: Iterable[_T]) -> List[Tuple[_T, ...]]: ... def make_decorator( wrapping_func: Callable[..., _U], result_index: int = ... ) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ... @overload def map_reduce( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: None = ..., reducefunc: None = ..., ) -> Dict[_U, List[_T]]: ... @overload def map_reduce( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], reducefunc: None = ..., ) -> Dict[_U, List[_V]]: ... @overload def map_reduce( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: None = ..., reducefunc: Callable[[List[_T]], _W] = ..., ) -> Dict[_U, _W]: ... @overload def map_reduce( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], reducefunc: Callable[[List[_V]], _W], ) -> Dict[_U, _W]: ... def rlocate( iterable: Iterable[_T], pred: Callable[..., object] = ..., window_size: Optional[int] = ..., ) -> Iterator[int]: ... def replace( iterable: Iterable[_T], pred: Callable[..., object], substitutes: Iterable[_U], count: Optional[int] = ..., window_size: int = ..., ) -> Iterator[Union[_T, _U]]: ... def partitions(iterable: Iterable[_T]) -> Iterator[List[List[_T]]]: ... def set_partitions( iterable: Iterable[_T], k: Optional[int] = ... ) -> Iterator[List[List[_T]]]: ... class time_limited(Generic[_T], Iterator[_T]): def __init__( self, limit_seconds: float, iterable: Iterable[_T] ) -> None: ... def __iter__(self) -> islice_extended[_T]: ... def __next__(self) -> _T: ... @overload def only( iterable: Iterable[_T], *, too_long: Optional[_Raisable] = ... ) -> Optional[_T]: ... @overload def only( iterable: Iterable[_T], default: _U, too_long: Optional[_Raisable] = ... ) -> Union[_T, _U]: ... def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ... def distinct_combinations( iterable: Iterable[_T], r: int ) -> Iterator[Tuple[_T, ...]]: ... def filter_except( validator: Callable[[Any], object], iterable: Iterable[_T], *exceptions: Type[BaseException] ) -> Iterator[_T]: ... def map_except( function: Callable[[Any], _U], iterable: Iterable[_T], *exceptions: Type[BaseException] ) -> Iterator[_U]: ... def map_if( iterable: Iterable[Any], pred: Callable[[Any], bool], func: Callable[[Any], Any], func_else: Optional[Callable[[Any], Any]] = ..., ) -> Iterator[Any]: ... def sample( iterable: Iterable[_T], k: int, weights: Optional[Iterable[float]] = ..., ) -> List[_T]: ... def is_sorted( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ..., reverse: bool = False, strict: bool = False, ) -> bool: ... class AbortThread(BaseException): pass class callback_iter(Generic[_T], Iterator[_T]): def __init__( self, func: Callable[..., Any], callback_kwd: str = ..., wait_seconds: float = ..., ) -> None: ... def __enter__(self) -> callback_iter[_T]: ... def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> Optional[bool]: ... def __iter__(self) -> callback_iter[_T]: ... def __next__(self) -> _T: ... def _reader(self) -> Iterator[_T]: ... @property def done(self) -> bool: ... @property def result(self) -> Any: ... def windowed_complete( iterable: Iterable[_T], n: int ) -> Iterator[Tuple[_T, ...]]: ... def all_unique( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ... ) -> bool: ... def nth_product(index: int, *args: Iterable[_T]) -> Tuple[_T, ...]: ... def nth_permutation( iterable: Iterable[_T], r: int, index: int ) -> Tuple[_T, ...]: ... def value_chain(*args: Union[_T, Iterable[_T]]) -> Iterable[_T]: ... def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ... def combination_index( element: Iterable[_T], iterable: Iterable[_T] ) -> int: ... def permutation_index( element: Iterable[_T], iterable: Iterable[_T] ) -> int: ... def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ... class countable(Generic[_T], Iterator[_T]): def __init__(self, iterable: Iterable[_T]) -> None: ... def __iter__(self) -> countable[_T]: ... def __next__(self) -> _T: ... def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[List[_T]]: ... def zip_broadcast( *objects: Union[_T, Iterable[_T]], scalar_types: Union[ type, Tuple[Union[type, Tuple[Any, ...]], ...], None ] = ..., strict: bool = ... ) -> Iterable[Tuple[_T, ...]]: ... def unique_in_window( iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ... ) -> Iterator[_T]: ... def duplicates_everseen( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ... ) -> Iterator[_T]: ... def duplicates_justseen( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ... ) -> Iterator[_T]: ... class _SupportsLessThan(Protocol): def __lt__(self, __other: Any) -> bool: ... _SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan) @overload def minmax( iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None ) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ... @overload def minmax( iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan] ) -> Tuple[_T, _T]: ... @overload def minmax( iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None, default: _U ) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ... @overload def minmax( iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan], default: _U, ) -> Union[_U, Tuple[_T, _T]]: ... @overload def minmax( iterable_or_value: _SupportsLessThanT, __other: _SupportsLessThanT, *others: _SupportsLessThanT ) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ... @overload def minmax( iterable_or_value: _T, __other: _T, *others: _T, key: Callable[[_T], _SupportsLessThan] ) -> Tuple[_T, _T]: ...
20,006
Python
.py
635
28.103937
78
0.587581
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,422
more.py
rembo10_headphones/lib/more_itertools/more.py
import warnings from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence from concurrent.futures import ThreadPoolExecutor from functools import partial, reduce, wraps from heapq import merge, heapify, heapreplace, heappop from itertools import ( chain, compress, count, cycle, dropwhile, groupby, islice, repeat, starmap, takewhile, tee, zip_longest, ) from math import exp, factorial, floor, log from queue import Empty, Queue from random import random, randrange, uniform from operator import itemgetter, mul, sub, gt, lt, ge, le from sys import hexversion, maxsize from time import monotonic from .recipes import ( consume, flatten, pairwise, powerset, take, unique_everseen, ) __all__ = [ 'AbortThread', 'SequenceView', 'UnequalIterablesError', 'adjacent', 'all_unique', 'always_iterable', 'always_reversible', 'bucket', 'callback_iter', 'chunked', 'chunked_even', 'circular_shifts', 'collapse', 'collate', 'combination_index', 'consecutive_groups', 'consumer', 'count_cycle', 'countable', 'difference', 'distinct_combinations', 'distinct_permutations', 'distribute', 'divide', 'duplicates_everseen', 'duplicates_justseen', 'exactly_n', 'filter_except', 'first', 'groupby_transform', 'ichunked', 'ilen', 'interleave', 'interleave_evenly', 'interleave_longest', 'intersperse', 'is_sorted', 'islice_extended', 'iterate', 'last', 'locate', 'lstrip', 'make_decorator', 'map_except', 'map_if', 'map_reduce', 'mark_ends', 'minmax', 'nth_or_last', 'nth_permutation', 'nth_product', 'numeric_range', 'one', 'only', 'padded', 'partitions', 'peekable', 'permutation_index', 'product_index', 'raise_', 'repeat_each', 'repeat_last', 'replace', 'rlocate', 'rstrip', 'run_length', 'sample', 'seekable', 'set_partitions', 'side_effect', 'sliced', 'sort_together', 'split_after', 'split_at', 'split_before', 'split_into', 'split_when', 'spy', 'stagger', 'strip', 'strictly_n', 'substrings', 'substrings_indexes', 'time_limited', 'unique_in_window', 'unique_to_each', 'unzip', 'value_chain', 'windowed', 'windowed_complete', 'with_iter', 'zip_broadcast', 'zip_equal', 'zip_offset', ] _marker = object() def chunked(iterable, n, strict=False): """Break *iterable* into lists of length *n*: >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) [[1, 2, 3], [4, 5, 6]] By the default, the last yielded list will have fewer than *n* elements if the length of *iterable* is not divisible by *n*: >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) [[1, 2, 3], [4, 5, 6], [7, 8]] To use a fill-in value instead, see the :func:`grouper` recipe. If the length of *iterable* is not divisible by *n* and *strict* is ``True``, then ``ValueError`` will be raised before the last list is yielded. """ iterator = iter(partial(take, n, iter(iterable)), []) if strict: if n is None: raise ValueError('n must not be None when using strict mode.') def ret(): for chunk in iterator: if len(chunk) != n: raise ValueError('iterable is not divisible by n.') yield chunk return iter(ret()) else: return iterator def first(iterable, default=_marker): """Return the first item of *iterable*, or *default* if *iterable* is empty. >>> first([0, 1, 2, 3]) 0 >>> first([], 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. :func:`first` is useful when you have a generator of expensive-to-retrieve values and want any arbitrary one. It is marginally shorter than ``next(iter(iterable), default)``. """ try: return next(iter(iterable)) except StopIteration as e: if default is _marker: raise ValueError( 'first() was called on an empty iterable, and no ' 'default value was provided.' ) from e return default def last(iterable, default=_marker): """Return the last item of *iterable*, or *default* if *iterable* is empty. >>> last([0, 1, 2, 3]) 3 >>> last([], 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. """ try: if isinstance(iterable, Sequence): return iterable[-1] # Work around https://bugs.python.org/issue38525 elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): return next(reversed(iterable)) else: return deque(iterable, maxlen=1)[-1] except (IndexError, TypeError, StopIteration): if default is _marker: raise ValueError( 'last() was called on an empty iterable, and no default was ' 'provided.' ) return default def nth_or_last(iterable, n, default=_marker): """Return the nth or the last item of *iterable*, or *default* if *iterable* is empty. >>> nth_or_last([0, 1, 2, 3], 2) 2 >>> nth_or_last([0, 1], 2) 1 >>> nth_or_last([], 0, 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``. """ return last(islice(iterable, n + 1), default=default) class peekable: """Wrap an iterator to allow lookahead and prepending elements. Call :meth:`peek` on the result to get the value that will be returned by :func:`next`. This won't advance the iterator: >>> p = peekable(['a', 'b']) >>> p.peek() 'a' >>> next(p) 'a' Pass :meth:`peek` a default value to return that instead of raising ``StopIteration`` when the iterator is exhausted. >>> p = peekable([]) >>> p.peek('hi') 'hi' peekables also offer a :meth:`prepend` method, which "inserts" items at the head of the iterable: >>> p = peekable([1, 2, 3]) >>> p.prepend(10, 11, 12) >>> next(p) 10 >>> p.peek() 11 >>> list(p) [11, 12, 1, 2, 3] peekables can be indexed. Index 0 is the item that will be returned by :func:`next`, index 1 is the item after that, and so on: The values up to the given index will be cached. >>> p = peekable(['a', 'b', 'c', 'd']) >>> p[0] 'a' >>> p[1] 'b' >>> next(p) 'a' Negative indexes are supported, but be aware that they will cache the remaining items in the source iterator, which may require significant storage. To check whether a peekable is exhausted, check its truth value: >>> p = peekable(['a', 'b']) >>> if p: # peekable has items ... list(p) ['a', 'b'] >>> if not p: # peekable is exhausted ... list(p) [] """ def __init__(self, iterable): self._it = iter(iterable) self._cache = deque() def __iter__(self): return self def __bool__(self): try: self.peek() except StopIteration: return False return True def peek(self, default=_marker): """Return the item that will be next returned from ``next()``. Return ``default`` if there are no items left. If ``default`` is not provided, raise ``StopIteration``. """ if not self._cache: try: self._cache.append(next(self._it)) except StopIteration: if default is _marker: raise return default return self._cache[0] def prepend(self, *items): """Stack up items to be the next ones returned from ``next()`` or ``self.peek()``. The items will be returned in first in, first out order:: >>> p = peekable([1, 2, 3]) >>> p.prepend(10, 11, 12) >>> next(p) 10 >>> list(p) [11, 12, 1, 2, 3] It is possible, by prepending items, to "resurrect" a peekable that previously raised ``StopIteration``. >>> p = peekable([]) >>> next(p) Traceback (most recent call last): ... StopIteration >>> p.prepend(1) >>> next(p) 1 >>> next(p) Traceback (most recent call last): ... StopIteration """ self._cache.extendleft(reversed(items)) def __next__(self): if self._cache: return self._cache.popleft() return next(self._it) def _get_slice(self, index): # Normalize the slice's arguments step = 1 if (index.step is None) else index.step if step > 0: start = 0 if (index.start is None) else index.start stop = maxsize if (index.stop is None) else index.stop elif step < 0: start = -1 if (index.start is None) else index.start stop = (-maxsize - 1) if (index.stop is None) else index.stop else: raise ValueError('slice step cannot be zero') # If either the start or stop index is negative, we'll need to cache # the rest of the iterable in order to slice from the right side. if (start < 0) or (stop < 0): self._cache.extend(self._it) # Otherwise we'll need to find the rightmost index and cache to that # point. else: n = min(max(start, stop) + 1, maxsize) cache_len = len(self._cache) if n >= cache_len: self._cache.extend(islice(self._it, n - cache_len)) return list(self._cache)[index] def __getitem__(self, index): if isinstance(index, slice): return self._get_slice(index) cache_len = len(self._cache) if index < 0: self._cache.extend(self._it) elif index >= cache_len: self._cache.extend(islice(self._it, index + 1 - cache_len)) return self._cache[index] def collate(*iterables, **kwargs): """Return a sorted merge of the items from each of several already-sorted *iterables*. >>> list(collate('ACDZ', 'AZ', 'JKL')) ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] Works lazily, keeping only the next value from each iterable in memory. Use :func:`collate` to, for example, perform a n-way mergesort of items that don't fit in memory. If a *key* function is specified, the iterables will be sorted according to its result: >>> key = lambda s: int(s) # Sort by numeric value, not by string >>> list(collate(['1', '10'], ['2', '11'], key=key)) ['1', '2', '10', '11'] If the *iterables* are sorted in descending order, set *reverse* to ``True``: >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) [5, 4, 3, 2, 1, 0] If the elements of the passed-in iterables are out of order, you might get unexpected results. On Python 3.5+, this function is an alias for :func:`heapq.merge`. """ warnings.warn( "collate is no longer part of more_itertools, use heapq.merge", DeprecationWarning, ) return merge(*iterables, **kwargs) def consumer(func): """Decorator that automatically advances a PEP-342-style "reverse iterator" to its first yield point so you don't have to call ``next()`` on it manually. >>> @consumer ... def tally(): ... i = 0 ... while True: ... print('Thing number %s is %s.' % (i, (yield))) ... i += 1 ... >>> t = tally() >>> t.send('red') Thing number 0 is red. >>> t.send('fish') Thing number 1 is fish. Without the decorator, you would have to call ``next(t)`` before ``t.send()`` could be used. """ @wraps(func) def wrapper(*args, **kwargs): gen = func(*args, **kwargs) next(gen) return gen return wrapper def ilen(iterable): """Return the number of items in *iterable*. >>> ilen(x for x in range(1000000) if x % 3 == 0) 333334 This consumes the iterable, so handle with care. """ # This approach was selected because benchmarks showed it's likely the # fastest of the known implementations at the time of writing. # See GitHub tracker: #236, #230. counter = count() deque(zip(iterable, counter), maxlen=0) return next(counter) def iterate(func, start): """Return ``start``, ``func(start)``, ``func(func(start))``, ... >>> from itertools import islice >>> list(islice(iterate(lambda x: 2*x, 1), 10)) [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] """ while True: yield start start = func(start) def with_iter(context_manager): """Wrap an iterable in a ``with`` statement, so it closes once exhausted. For example, this will close the file when the iterator is exhausted:: upper_lines = (line.upper() for line in with_iter(open('foo'))) Any context manager which returns an iterable is a candidate for ``with_iter``. """ with context_manager as iterable: yield from iterable def one(iterable, too_short=None, too_long=None): """Return the first item from *iterable*, which is expected to contain only that item. Raise an exception if *iterable* is empty or has more than one item. :func:`one` is useful for ensuring that an iterable contains only one item. For example, it can be used to retrieve the result of a database query that is expected to return a single row. If *iterable* is empty, ``ValueError`` will be raised. You may specify a different exception with the *too_short* keyword: >>> it = [] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (expected 1)' >>> too_short = IndexError('too few items') >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... IndexError: too few items Similarly, if *iterable* contains more than one item, ``ValueError`` will be raised. You may specify a different exception with the *too_long* keyword: >>> it = ['too', 'many'] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Expected exactly one item in iterable, but got 'too', 'many', and perhaps more. >>> too_long = RuntimeError >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... RuntimeError Note that :func:`one` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) try: first_value = next(it) except StopIteration as e: raise ( too_short or ValueError('too few items in iterable (expected 1)') ) from e try: second_value = next(it) except StopIteration: pass else: msg = ( 'Expected exactly one item in iterable, but got {!r}, {!r}, ' 'and perhaps more.'.format(first_value, second_value) ) raise too_long or ValueError(msg) return first_value def raise_(exception, *args): raise exception(*args) def strictly_n(iterable, n, too_short=None, too_long=None): """Validate that *iterable* has exactly *n* items and return them if it does. If it has fewer than *n* items, call function *too_short* with those items. If it has more than *n* items, call function *too_long* with the first ``n + 1`` items. >>> iterable = ['a', 'b', 'c', 'd'] >>> n = 4 >>> list(strictly_n(iterable, n)) ['a', 'b', 'c', 'd'] By default, *too_short* and *too_long* are functions that raise ``ValueError``. >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too few items in iterable (got 2) >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (got at least 3) You can instead supply functions that do something else. *too_short* will be called with the number of items in *iterable*. *too_long* will be called with `n + 1`. >>> def too_short(item_count): ... raise RuntimeError >>> it = strictly_n('abcd', 6, too_short=too_short) >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... RuntimeError >>> def too_long(item_count): ... print('The boss is going to hear about this') >>> it = strictly_n('abcdef', 4, too_long=too_long) >>> list(it) The boss is going to hear about this ['a', 'b', 'c', 'd'] """ if too_short is None: too_short = lambda item_count: raise_( ValueError, 'Too few items in iterable (got {})'.format(item_count), ) if too_long is None: too_long = lambda item_count: raise_( ValueError, 'Too many items in iterable (got at least {})'.format(item_count), ) it = iter(iterable) for i in range(n): try: item = next(it) except StopIteration: too_short(i) return else: yield item try: next(it) except StopIteration: pass else: too_long(n + 1) def distinct_permutations(iterable, r=None): """Yield successive distinct permutations of the elements in *iterable*. >>> sorted(distinct_permutations([1, 0, 1])) [(0, 1, 1), (1, 0, 1), (1, 1, 0)] Equivalent to ``set(permutations(iterable))``, except duplicates are not generated and thrown away. For larger input sequences this is much more efficient. Duplicate permutations arise when there are duplicated elements in the input iterable. The number of items returned is `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of items input, and each `x_i` is the count of a distinct item in the input sequence. If *r* is given, only the *r*-length permutations are yielded. >>> sorted(distinct_permutations([1, 0, 1], r=2)) [(0, 1), (1, 0), (1, 1)] >>> sorted(distinct_permutations(range(3), r=2)) [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] """ # Algorithm: https://w.wiki/Qai def _full(A): while True: # Yield the permutation we have yield tuple(A) # Find the largest index i such that A[i] < A[i + 1] for i in range(size - 2, -1, -1): if A[i] < A[i + 1]: break # If no such index exists, this permutation is the last one else: return # Find the largest index j greater than j such that A[i] < A[j] for j in range(size - 1, i, -1): if A[i] < A[j]: break # Swap the value of A[i] with that of A[j], then reverse the # sequence from A[i + 1] to form the new permutation A[i], A[j] = A[j], A[i] A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] # Algorithm: modified from the above def _partial(A, r): # Split A into the first r items and the last r items head, tail = A[:r], A[r:] right_head_indexes = range(r - 1, -1, -1) left_tail_indexes = range(len(tail)) while True: # Yield the permutation we have yield tuple(head) # Starting from the right, find the first index of the head with # value smaller than the maximum value of the tail - call it i. pivot = tail[-1] for i in right_head_indexes: if head[i] < pivot: break pivot = head[i] else: return # Starting from the left, find the first value of the tail # with a value greater than head[i] and swap. for j in left_tail_indexes: if tail[j] > head[i]: head[i], tail[j] = tail[j], head[i] break # If we didn't find one, start from the right and find the first # index of the head with a value greater than head[i] and swap. else: for j in right_head_indexes: if head[j] > head[i]: head[i], head[j] = head[j], head[i] break # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] tail += head[: i - r : -1] # head[i + 1:][::-1] i += 1 head[i:], tail[:] = tail[: r - i], tail[r - i :] items = sorted(iterable) size = len(items) if r is None: r = size if 0 < r <= size: return _full(items) if (r == size) else _partial(items, r) return iter(() if r else ((),)) def intersperse(e, iterable, n=1): """Intersperse filler element *e* among the items in *iterable*, leaving *n* items between each filler element. >>> list(intersperse('!', [1, 2, 3, 4, 5])) [1, '!', 2, '!', 3, '!', 4, '!', 5] >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) [1, 2, None, 3, 4, None, 5] """ if n == 0: raise ValueError('n must be > 0') elif n == 1: # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... # islice(..., 1, None) -> x_0, e, x_1, e, x_2... return islice(interleave(repeat(e), iterable), 1, None) else: # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... # flatten(...) -> x_0, x_1, e, x_2, x_3... filler = repeat([e]) chunks = chunked(iterable, n) return flatten(islice(interleave(filler, chunks), 1, None)) def unique_to_each(*iterables): """Return the elements from each of the input iterables that aren't in the other input iterables. For example, suppose you have a set of packages, each with a set of dependencies:: {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} If you remove one package, which dependencies can also be removed? If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) [['A'], ['C'], ['D']] If there are duplicates in one input iterable that aren't in the others they will be duplicated in the output. Input order is preserved:: >>> unique_to_each("mississippi", "missouri") [['p', 'p'], ['o', 'u', 'r']] It is assumed that the elements of each iterable are hashable. """ pool = [list(it) for it in iterables] counts = Counter(chain.from_iterable(map(set, pool))) uniques = {element for element in counts if counts[element] == 1} return [list(filter(uniques.__contains__, it)) for it in pool] def windowed(seq, n, fillvalue=None, step=1): """Return a sliding window of width *n* over the given iterable. >>> all_windows = windowed([1, 2, 3, 4, 5], 3) >>> list(all_windows) [(1, 2, 3), (2, 3, 4), (3, 4, 5)] When the window is larger than the iterable, *fillvalue* is used in place of missing values: >>> list(windowed([1, 2, 3], 4)) [(1, 2, 3, None)] Each window will advance in increments of *step*: >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) [(1, 2, 3), (3, 4, 5), (5, 6, '!')] To slide into the iterable's items, use :func:`chain` to add filler items to the left: >>> iterable = [1, 2, 3, 4] >>> n = 3 >>> padding = [None] * (n - 1) >>> list(windowed(chain(padding, iterable), 3)) [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] """ if n < 0: raise ValueError('n must be >= 0') if n == 0: yield tuple() return if step < 1: raise ValueError('step must be >= 1') window = deque(maxlen=n) i = n for _ in map(window.append, seq): i -= 1 if not i: i = step yield tuple(window) size = len(window) if size < n: yield tuple(chain(window, repeat(fillvalue, n - size))) elif 0 < i < min(step, n): window += (fillvalue,) * i yield tuple(window) def substrings(iterable): """Yield all of the substrings of *iterable*. >>> [''.join(s) for s in substrings('more')] ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] Note that non-string iterables can also be subdivided. >>> list(substrings([0, 1, 2])) [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] """ # The length-1 substrings seq = [] for item in iter(iterable): seq.append(item) yield (item,) seq = tuple(seq) item_count = len(seq) # And the rest for n in range(2, item_count + 1): for i in range(item_count - n + 1): yield seq[i : i + n] def substrings_indexes(seq, reverse=False): """Yield all substrings and their positions in *seq* The items yielded will be a tuple of the form ``(substr, i, j)``, where ``substr == seq[i:j]``. This function only works for iterables that support slicing, such as ``str`` objects. >>> for item in substrings_indexes('more'): ... print(item) ('m', 0, 1) ('o', 1, 2) ('r', 2, 3) ('e', 3, 4) ('mo', 0, 2) ('or', 1, 3) ('re', 2, 4) ('mor', 0, 3) ('ore', 1, 4) ('more', 0, 4) Set *reverse* to ``True`` to yield the same items in the opposite order. """ r = range(1, len(seq) + 1) if reverse: r = reversed(r) return ( (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) ) class bucket: """Wrap *iterable* and return an object that buckets it iterable into child iterables based on a *key* function. >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character >>> sorted(list(s)) # Get the keys ['a', 'b', 'c'] >>> a_iterable = s['a'] >>> next(a_iterable) 'a1' >>> next(a_iterable) 'a2' >>> list(s['b']) ['b1', 'b2', 'b3'] The original iterable will be advanced and its items will be cached until they are used by the child iterables. This may require significant storage. By default, attempting to select a bucket to which no items belong will exhaust the iterable and cache all values. If you specify a *validator* function, selected buckets will instead be checked against it. >>> from itertools import count >>> it = count(1, 2) # Infinite sequence of odd numbers >>> key = lambda x: x % 10 # Bucket by last digit >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only >>> s = bucket(it, key=key, validator=validator) >>> 2 in s False >>> list(s[2]) [] """ def __init__(self, iterable, key, validator=None): self._it = iter(iterable) self._key = key self._cache = defaultdict(deque) self._validator = validator or (lambda x: True) def __contains__(self, value): if not self._validator(value): return False try: item = next(self[value]) except StopIteration: return False else: self._cache[value].appendleft(item) return True def _get_values(self, value): """ Helper to yield items from the parent iterator that match *value*. Items that don't match are stored in the local cache as they are encountered. """ while True: # If we've cached some items that match the target value, emit # the first one and evict it from the cache. if self._cache[value]: yield self._cache[value].popleft() # Otherwise we need to advance the parent iterator to search for # a matching item, caching the rest. else: while True: try: item = next(self._it) except StopIteration: return item_value = self._key(item) if item_value == value: yield item break elif self._validator(item_value): self._cache[item_value].append(item) def __iter__(self): for item in self._it: item_value = self._key(item) if self._validator(item_value): self._cache[item_value].append(item) yield from self._cache.keys() def __getitem__(self, value): if not self._validator(value): return iter(()) return self._get_values(value) def spy(iterable, n=1): """Return a 2-tuple with a list containing the first *n* elements of *iterable*, and an iterator with the same items as *iterable*. This allows you to "look ahead" at the items in the iterable without advancing it. There is one item in the list by default: >>> iterable = 'abcdefg' >>> head, iterable = spy(iterable) >>> head ['a'] >>> list(iterable) ['a', 'b', 'c', 'd', 'e', 'f', 'g'] You may use unpacking to retrieve items instead of lists: >>> (head,), iterable = spy('abcdefg') >>> head 'a' >>> (first, second), iterable = spy('abcdefg', 2) >>> first 'a' >>> second 'b' The number of items requested can be larger than the number of items in the iterable: >>> iterable = [1, 2, 3, 4, 5] >>> head, iterable = spy(iterable, 10) >>> head [1, 2, 3, 4, 5] >>> list(iterable) [1, 2, 3, 4, 5] """ it = iter(iterable) head = take(n, it) return head.copy(), chain(head, it) def interleave(*iterables): """Return a new iterable yielding from each iterable in turn, until the shortest is exhausted. >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7] For a version that doesn't terminate after the shortest iterable is exhausted, see :func:`interleave_longest`. """ return chain.from_iterable(zip(*iterables)) def interleave_longest(*iterables): """Return a new iterable yielding from each iterable in turn, skipping any that are exhausted. >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7, 3, 8] This function produces the same output as :func:`roundrobin`, but may perform better for some inputs (in particular when the number of iterables is large). """ i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) return (x for x in i if x is not _marker) def interleave_evenly(iterables, lengths=None): """ Interleave multiple iterables so that their elements are evenly distributed throughout the output sequence. >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] >>> list(interleave_evenly(iterables)) [1, 2, 'a', 3, 4, 'b', 5] >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] >>> list(interleave_evenly(iterables)) [1, 6, 4, 2, 7, 3, 8, 5] This function requires iterables of known length. Iterables without ``__len__()`` can be used by manually specifying lengths with *lengths*: >>> from itertools import combinations, repeat >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] >>> lengths = [4 * (4 - 1) // 2, 3] >>> list(interleave_evenly(iterables, lengths=lengths)) [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] Based on Bresenham's algorithm. """ if lengths is None: try: lengths = [len(it) for it in iterables] except TypeError: raise ValueError( 'Iterable lengths could not be determined automatically. ' 'Specify them with the lengths keyword.' ) elif len(iterables) != len(lengths): raise ValueError('Mismatching number of iterables and lengths.') dims = len(lengths) # sort iterables by length, descending lengths_permute = sorted( range(dims), key=lambda i: lengths[i], reverse=True ) lengths_desc = [lengths[i] for i in lengths_permute] iters_desc = [iter(iterables[i]) for i in lengths_permute] # the longest iterable is the primary one (Bresenham: the longest # distance along an axis) delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] errors = [delta_primary // dims] * len(deltas_secondary) to_yield = sum(lengths) while to_yield: yield next(iter_primary) to_yield -= 1 # update errors for each secondary iterable errors = [e - delta for e, delta in zip(errors, deltas_secondary)] # those iterables for which the error is negative are yielded # ("diagonal step" in Bresenham) for i, e in enumerate(errors): if e < 0: yield next(iters_secondary[i]) to_yield -= 1 errors[i] += delta_primary def collapse(iterable, base_type=None, levels=None): """Flatten an iterable with multiple levels of nesting (e.g., a list of lists of tuples) into non-iterable types. >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] >>> list(collapse(iterable)) [1, 2, 3, 4, 5, 6] Binary and text strings are not considered iterable and will not be collapsed. To avoid collapsing other types, specify *base_type*: >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] >>> list(collapse(iterable, base_type=tuple)) ['ab', ('cd', 'ef'), 'gh', 'ij'] Specify *levels* to stop flattening after a certain level: >>> iterable = [('a', ['b']), ('c', ['d'])] >>> list(collapse(iterable)) # Fully flattened ['a', 'b', 'c', 'd'] >>> list(collapse(iterable, levels=1)) # Only one level flattened ['a', ['b'], 'c', ['d']] """ def walk(node, level): if ( ((levels is not None) and (level > levels)) or isinstance(node, (str, bytes)) or ((base_type is not None) and isinstance(node, base_type)) ): yield node return try: tree = iter(node) except TypeError: yield node return else: for child in tree: yield from walk(child, level + 1) yield from walk(iterable, 0) def side_effect(func, iterable, chunk_size=None, before=None, after=None): """Invoke *func* on each item in *iterable* (or on each *chunk_size* group of items) before yielding the item. `func` must be a function that takes a single argument. Its return value will be discarded. *before* and *after* are optional functions that take no arguments. They will be executed before iteration starts and after it ends, respectively. `side_effect` can be used for logging, updating progress bars, or anything that is not functionally "pure." Emitting a status message: >>> from more_itertools import consume >>> func = lambda item: print('Received {}'.format(item)) >>> consume(side_effect(func, range(2))) Received 0 Received 1 Operating on chunks of items: >>> pair_sums = [] >>> func = lambda chunk: pair_sums.append(sum(chunk)) >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) [0, 1, 2, 3, 4, 5] >>> list(pair_sums) [1, 5, 9] Writing to a file-like object: >>> from io import StringIO >>> from more_itertools import consume >>> f = StringIO() >>> func = lambda x: print(x, file=f) >>> before = lambda: print(u'HEADER', file=f) >>> after = f.close >>> it = [u'a', u'b', u'c'] >>> consume(side_effect(func, it, before=before, after=after)) >>> f.closed True """ try: if before is not None: before() if chunk_size is None: for item in iterable: func(item) yield item else: for chunk in chunked(iterable, chunk_size): func(chunk) yield from chunk finally: if after is not None: after() def sliced(seq, n, strict=False): """Yield slices of length *n* from the sequence *seq*. >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) [(1, 2, 3), (4, 5, 6)] By the default, the last yielded slice will have fewer than *n* elements if the length of *seq* is not divisible by *n*: >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) [(1, 2, 3), (4, 5, 6), (7, 8)] If the length of *seq* is not divisible by *n* and *strict* is ``True``, then ``ValueError`` will be raised before the last slice is yielded. This function will only work for iterables that support slicing. For non-sliceable iterables, see :func:`chunked`. """ iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) if strict: def ret(): for _slice in iterator: if len(_slice) != n: raise ValueError("seq is not divisible by n.") yield _slice return iter(ret()) else: return iterator def split_at(iterable, pred, maxsplit=-1, keep_separator=False): """Yield lists of items from *iterable*, where each list is delimited by an item where callable *pred* returns ``True``. >>> list(split_at('abcdcba', lambda x: x == 'b')) [['a'], ['c', 'd', 'c'], ['a']] >>> list(split_at(range(10), lambda n: n % 2 == 1)) [[0], [2], [4], [6], [8], []] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) [[0], [2], [4, 5, 6, 7, 8, 9]] By default, the delimiting items are not included in the output. The include them, set *keep_separator* to ``True``. >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: if pred(item): yield buf if keep_separator: yield [item] if maxsplit == 1: yield list(it) return buf = [] maxsplit -= 1 else: buf.append(item) yield buf def split_before(iterable, pred, maxsplit=-1): """Yield lists of items from *iterable*, where each list ends just before an item for which callable *pred* returns ``True``: >>> list(split_before('OneTwo', lambda s: s.isupper())) [['O', 'n', 'e'], ['T', 'w', 'o']] >>> list(split_before(range(10), lambda n: n % 3 == 0)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: if pred(item) and buf: yield buf if maxsplit == 1: yield [item] + list(it) return buf = [] maxsplit -= 1 buf.append(item) if buf: yield buf def split_after(iterable, pred, maxsplit=-1): """Yield lists of items from *iterable*, where each list ends with an item where callable *pred* returns ``True``: >>> list(split_after('one1two2', lambda s: s.isdigit())) [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] >>> list(split_after(range(10), lambda n: n % 3 == 0)) [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] """ if maxsplit == 0: yield list(iterable) return buf = [] it = iter(iterable) for item in it: buf.append(item) if pred(item) and buf: yield buf if maxsplit == 1: yield list(it) return buf = [] maxsplit -= 1 if buf: yield buf def split_when(iterable, pred, maxsplit=-1): """Split *iterable* into pieces based on the output of *pred*. *pred* should be a function that takes successive pairs of items and returns ``True`` if the iterable should be split in between them. For example, to find runs of increasing numbers, split the iterable when element ``i`` is larger than element ``i + 1``: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) [[1, 2, 3, 3], [2, 5], [2, 4], [2]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], ... lambda x, y: x > y, maxsplit=2)) [[1, 2, 3, 3], [2, 5], [2, 4, 2]] """ if maxsplit == 0: yield list(iterable) return it = iter(iterable) try: cur_item = next(it) except StopIteration: return buf = [cur_item] for next_item in it: if pred(cur_item, next_item): yield buf if maxsplit == 1: yield [next_item] + list(it) return buf = [] maxsplit -= 1 buf.append(next_item) cur_item = next_item yield buf def split_into(iterable, sizes): """Yield a list of sequential items from *iterable* of length 'n' for each integer 'n' in *sizes*. >>> list(split_into([1,2,3,4,5,6], [1,2,3])) [[1], [2, 3], [4, 5, 6]] If the sum of *sizes* is smaller than the length of *iterable*, then the remaining items of *iterable* will not be returned. >>> list(split_into([1,2,3,4,5,6], [2,3])) [[1, 2], [3, 4, 5]] If the sum of *sizes* is larger than the length of *iterable*, fewer items will be returned in the iteration that overruns *iterable* and further lists will be empty: >>> list(split_into([1,2,3,4], [1,2,3,4])) [[1], [2, 3], [4], []] When a ``None`` object is encountered in *sizes*, the returned list will contain items up to the end of *iterable* the same way that itertools.slice does: >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] :func:`split_into` can be useful for grouping a series of items where the sizes of the groups are not uniform. An example would be where in a row from a table, multiple columns represent elements of the same feature (e.g. a point represented by x,y,z) but, the format is not the same for all columns. """ # convert the iterable argument into an iterator so its contents can # be consumed by islice in case it is a generator it = iter(iterable) for size in sizes: if size is None: yield list(it) return else: yield list(islice(it, size)) def padded(iterable, fillvalue=None, n=None, next_multiple=False): """Yield the elements from *iterable*, followed by *fillvalue*, such that at least *n* items are emitted. >>> list(padded([1, 2, 3], '?', 5)) [1, 2, 3, '?', '?'] If *next_multiple* is ``True``, *fillvalue* will be emitted until the number of items emitted is a multiple of *n*:: >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) [1, 2, 3, 4, None, None] If *n* is ``None``, *fillvalue* will be emitted indefinitely. """ it = iter(iterable) if n is None: yield from chain(it, repeat(fillvalue)) elif n < 1: raise ValueError('n must be at least 1') else: item_count = 0 for item in it: yield item item_count += 1 remaining = (n - item_count) % n if next_multiple else n - item_count for _ in range(remaining): yield fillvalue def repeat_each(iterable, n=2): """Repeat each element in *iterable* *n* times. >>> list(repeat_each('ABC', 3)) ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] """ return chain.from_iterable(map(repeat, iterable, repeat(n))) def repeat_last(iterable, default=None): """After the *iterable* is exhausted, keep yielding its last element. >>> list(islice(repeat_last(range(3)), 5)) [0, 1, 2, 2, 2] If the iterable is empty, yield *default* forever:: >>> list(islice(repeat_last(range(0), 42), 5)) [42, 42, 42, 42, 42] """ item = _marker for item in iterable: yield item final = default if item is _marker else item yield from repeat(final) def distribute(n, iterable): """Distribute the items from *iterable* among *n* smaller iterables. >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) >>> list(group_1) [1, 3, 5] >>> list(group_2) [2, 4, 6] If the length of *iterable* is not evenly divisible by *n*, then the length of the returned iterables will not be identical: >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) >>> [list(c) for c in children] [[1, 4, 7], [2, 5], [3, 6]] If the length of *iterable* is smaller than *n*, then the last returned iterables will be empty: >>> children = distribute(5, [1, 2, 3]) >>> [list(c) for c in children] [[1], [2], [3], [], []] This function uses :func:`itertools.tee` and may require significant storage. If you need the order items in the smaller iterables to match the original iterable, see :func:`divide`. """ if n < 1: raise ValueError('n must be at least 1') children = tee(iterable, n) return [islice(it, index, None, n) for index, it in enumerate(children)] def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): """Yield tuples whose elements are offset from *iterable*. The amount by which the `i`-th item in each tuple is offset is given by the `i`-th item in *offsets*. >>> list(stagger([0, 1, 2, 3])) [(None, 0, 1), (0, 1, 2), (1, 2, 3)] >>> list(stagger(range(8), offsets=(0, 2, 4))) [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] By default, the sequence will end when the final element of a tuple is the last item in the iterable. To continue until the first element of a tuple is the last item in the iterable, set *longest* to ``True``:: >>> list(stagger([0, 1, 2, 3], longest=True)) [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] By default, ``None`` will be used to replace offsets beyond the end of the sequence. Specify *fillvalue* to use some other value. """ children = tee(iterable, len(offsets)) return zip_offset( *children, offsets=offsets, longest=longest, fillvalue=fillvalue ) class UnequalIterablesError(ValueError): def __init__(self, details=None): msg = 'Iterables have different lengths' if details is not None: msg += (': index 0 has length {}; index {} has length {}').format( *details ) super().__init__(msg) def _zip_equal_generator(iterables): for combo in zip_longest(*iterables, fillvalue=_marker): for val in combo: if val is _marker: raise UnequalIterablesError() yield combo def _zip_equal(*iterables): # Check whether the iterables are all the same size. try: first_size = len(iterables[0]) for i, it in enumerate(iterables[1:], 1): size = len(it) if size != first_size: break else: # If we didn't break out, we can use the built-in zip. return zip(*iterables) # If we did break out, there was a mismatch. raise UnequalIterablesError(details=(first_size, i, size)) # If any one of the iterables didn't have a length, start reading # them until one runs out. except TypeError: return _zip_equal_generator(iterables) def zip_equal(*iterables): """``zip`` the input *iterables* together, but raise ``UnequalIterablesError`` if they aren't all the same length. >>> it_1 = range(3) >>> it_2 = iter('abc') >>> list(zip_equal(it_1, it_2)) [(0, 'a'), (1, 'b'), (2, 'c')] >>> it_1 = range(3) >>> it_2 = iter('abcd') >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... more_itertools.more.UnequalIterablesError: Iterables have different lengths """ if hexversion >= 0x30A00A6: warnings.warn( ( 'zip_equal will be removed in a future version of ' 'more-itertools. Use the builtin zip function with ' 'strict=True instead.' ), DeprecationWarning, ) return _zip_equal(*iterables) def zip_offset(*iterables, offsets, longest=False, fillvalue=None): """``zip`` the input *iterables* together, but offset the `i`-th iterable by the `i`-th item in *offsets*. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] This can be used as a lightweight alternative to SciPy or pandas to analyze data sets in which some series have a lead or lag relationship. By default, the sequence will end when the shortest iterable is exhausted. To continue until the longest iterable is exhausted, set *longest* to ``True``. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] By default, ``None`` will be used to replace offsets beyond the end of the sequence. Specify *fillvalue* to use some other value. """ if len(iterables) != len(offsets): raise ValueError("Number of iterables and offsets didn't match") staggered = [] for it, n in zip(iterables, offsets): if n < 0: staggered.append(chain(repeat(fillvalue, -n), it)) elif n > 0: staggered.append(islice(it, n, None)) else: staggered.append(it) if longest: return zip_longest(*staggered, fillvalue=fillvalue) return zip(*staggered) def sort_together(iterables, key_list=(0,), key=None, reverse=False): """Return the input iterables sorted together, with *key_list* as the priority for sorting. All iterables are trimmed to the length of the shortest one. This can be used like the sorting function in a spreadsheet. If each iterable represents a column of data, the key list determines which columns are used for sorting. By default, all iterables are sorted using the ``0``-th iterable:: >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] >>> sort_together(iterables) [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] Set a different key list to sort according to another iterable. Specifying multiple keys dictates how ties are broken:: >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] >>> sort_together(iterables, key_list=(1, 2)) [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] To sort by a function of the elements of the iterable, pass a *key* function. Its arguments are the elements of the iterables corresponding to the key list:: >>> names = ('a', 'b', 'c') >>> lengths = (1, 2, 3) >>> widths = (5, 2, 1) >>> def area(length, width): ... return length * width >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] Set *reverse* to ``True`` to sort in descending order. >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) [(3, 2, 1), ('a', 'b', 'c')] """ if key is None: # if there is no key function, the key argument to sorted is an # itemgetter key_argument = itemgetter(*key_list) else: # if there is a key function, call it with the items at the offsets # specified by the key function as arguments key_list = list(key_list) if len(key_list) == 1: # if key_list contains a single item, pass the item at that offset # as the only argument to the key function key_offset = key_list[0] key_argument = lambda zipped_items: key(zipped_items[key_offset]) else: # if key_list contains multiple items, use itemgetter to return a # tuple of items, which we pass as *args to the key function get_key_items = itemgetter(*key_list) key_argument = lambda zipped_items: key( *get_key_items(zipped_items) ) return list( zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) ) def unzip(iterable): """The inverse of :func:`zip`, this function disaggregates the elements of the zipped *iterable*. The ``i``-th iterable contains the ``i``-th element from each element of the zipped iterable. The first element is used to to determine the length of the remaining elements. >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] >>> letters, numbers = unzip(iterable) >>> list(letters) ['a', 'b', 'c', 'd'] >>> list(numbers) [1, 2, 3, 4] This is similar to using ``zip(*iterable)``, but it avoids reading *iterable* into memory. Note, however, that this function uses :func:`itertools.tee` and thus may require significant storage. """ head, iterable = spy(iter(iterable)) if not head: # empty iterable, e.g. zip([], [], []) return () # spy returns a one-length iterable as head head = head[0] iterables = tee(iterable, len(head)) def itemgetter(i): def getter(obj): try: return obj[i] except IndexError: # basically if we have an iterable like # iter([(1, 2, 3), (4, 5), (6,)]) # the second unzipped iterable would fail at the third tuple # since it would try to access tup[1] # same with the third unzipped iterable and the second tuple # to support these "improperly zipped" iterables, # we create a custom itemgetter # which just stops the unzipped iterables # at first length mismatch raise StopIteration return getter return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) def divide(n, iterable): """Divide the elements from *iterable* into *n* parts, maintaining order. >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) >>> list(group_1) [1, 2, 3] >>> list(group_2) [4, 5, 6] If the length of *iterable* is not evenly divisible by *n*, then the length of the returned iterables will not be identical: >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) >>> [list(c) for c in children] [[1, 2, 3], [4, 5], [6, 7]] If the length of the iterable is smaller than n, then the last returned iterables will be empty: >>> children = divide(5, [1, 2, 3]) >>> [list(c) for c in children] [[1], [2], [3], [], []] This function will exhaust the iterable before returning and may require significant storage. If order is not important, see :func:`distribute`, which does not first pull the iterable into memory. """ if n < 1: raise ValueError('n must be at least 1') try: iterable[:0] except TypeError: seq = tuple(iterable) else: seq = iterable q, r = divmod(len(seq), n) ret = [] stop = 0 for i in range(1, n + 1): start = stop stop += q + 1 if i <= r else q ret.append(iter(seq[start:stop])) return ret def always_iterable(obj, base_type=(str, bytes)): """If *obj* is iterable, return an iterator over its items:: >>> obj = (1, 2, 3) >>> list(always_iterable(obj)) [1, 2, 3] If *obj* is not iterable, return a one-item iterable containing *obj*:: >>> obj = 1 >>> list(always_iterable(obj)) [1] If *obj* is ``None``, return an empty iterable: >>> obj = None >>> list(always_iterable(None)) [] By default, binary and text strings are not considered iterable:: >>> obj = 'foo' >>> list(always_iterable(obj)) ['foo'] If *base_type* is set, objects for which ``isinstance(obj, base_type)`` returns ``True`` won't be considered iterable. >>> obj = {'a': 1} >>> list(always_iterable(obj)) # Iterate over the dict's keys ['a'] >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit [{'a': 1}] Set *base_type* to ``None`` to avoid any special handling and treat objects Python considers iterable as iterable: >>> obj = 'foo' >>> list(always_iterable(obj, base_type=None)) ['f', 'o', 'o'] """ if obj is None: return iter(()) if (base_type is not None) and isinstance(obj, base_type): return iter((obj,)) try: return iter(obj) except TypeError: return iter((obj,)) def adjacent(predicate, iterable, distance=1): """Return an iterable over `(bool, item)` tuples where the `item` is drawn from *iterable* and the `bool` indicates whether that item satisfies the *predicate* or is adjacent to an item that does. For example, to find whether items are adjacent to a ``3``:: >>> list(adjacent(lambda x: x == 3, range(6))) [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] Set *distance* to change what counts as adjacent. For example, to find whether items are two places away from a ``3``: >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] This is useful for contextualizing the results of a search function. For example, a code comparison tool might want to identify lines that have changed, but also surrounding lines to give the viewer of the diff context. The predicate function will only be called once for each item in the iterable. See also :func:`groupby_transform`, which can be used with this function to group ranges of items with the same `bool` value. """ # Allow distance=0 mainly for testing that it reproduces results with map() if distance < 0: raise ValueError('distance must be at least 0') i1, i2 = tee(iterable) padding = [False] * distance selected = chain(padding, map(predicate, i1), padding) adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) return zip(adjacent_to_selected, i2) def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): """An extension of :func:`itertools.groupby` that can apply transformations to the grouped data. * *keyfunc* is a function computing a key value for each item in *iterable* * *valuefunc* is a function that transforms the individual items from *iterable* after grouping * *reducefunc* is a function that transforms each group of items >>> iterable = 'aAAbBBcCC' >>> keyfunc = lambda k: k.upper() >>> valuefunc = lambda v: v.lower() >>> reducefunc = lambda g: ''.join(g) >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] Each optional argument defaults to an identity function if not specified. :func:`groupby_transform` is useful when grouping elements of an iterable using a separate iterable as the key. To do this, :func:`zip` the iterables and pass a *keyfunc* that extracts the first element and a *valuefunc* that extracts the second element:: >>> from operator import itemgetter >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] >>> values = 'abcdefghi' >>> iterable = zip(keys, values) >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) >>> [(k, ''.join(g)) for k, g in grouper] [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] Note that the order of items in the iterable is significant. Only adjacent items are grouped together, so if you don't want any duplicate groups, you should sort the iterable by the key function. """ ret = groupby(iterable, keyfunc) if valuefunc: ret = ((k, map(valuefunc, g)) for k, g in ret) if reducefunc: ret = ((k, reducefunc(g)) for k, g in ret) return ret class numeric_range(abc.Sequence, abc.Hashable): """An extension of the built-in ``range()`` function whose arguments can be any orderable numeric type. With only *stop* specified, *start* defaults to ``0`` and *step* defaults to ``1``. The output items will match the type of *stop*: >>> list(numeric_range(3.5)) [0.0, 1.0, 2.0, 3.0] With only *start* and *stop* specified, *step* defaults to ``1``. The output items will match the type of *start*: >>> from decimal import Decimal >>> start = Decimal('2.1') >>> stop = Decimal('5.1') >>> list(numeric_range(start, stop)) [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] With *start*, *stop*, and *step* specified the output items will match the type of ``start + step``: >>> from fractions import Fraction >>> start = Fraction(1, 2) # Start at 1/2 >>> stop = Fraction(5, 2) # End at 5/2 >>> step = Fraction(1, 2) # Count by 1/2 >>> list(numeric_range(start, stop, step)) [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] If *step* is zero, ``ValueError`` is raised. Negative steps are supported: >>> list(numeric_range(3, -1, -1.0)) [3.0, 2.0, 1.0, 0.0] Be aware of the limitations of floating point numbers; the representation of the yielded numbers may be surprising. ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* is a ``datetime.timedelta`` object: >>> import datetime >>> start = datetime.datetime(2019, 1, 1) >>> stop = datetime.datetime(2019, 1, 3) >>> step = datetime.timedelta(days=1) >>> items = iter(numeric_range(start, stop, step)) >>> next(items) datetime.datetime(2019, 1, 1, 0, 0) >>> next(items) datetime.datetime(2019, 1, 2, 0, 0) """ _EMPTY_HASH = hash(range(0, 0)) def __init__(self, *args): argc = len(args) if argc == 1: (self._stop,) = args self._start = type(self._stop)(0) self._step = type(self._stop - self._start)(1) elif argc == 2: self._start, self._stop = args self._step = type(self._stop - self._start)(1) elif argc == 3: self._start, self._stop, self._step = args elif argc == 0: raise TypeError( 'numeric_range expected at least ' '1 argument, got {}'.format(argc) ) else: raise TypeError( 'numeric_range expected at most ' '3 arguments, got {}'.format(argc) ) self._zero = type(self._step)(0) if self._step == self._zero: raise ValueError('numeric_range() arg 3 must not be zero') self._growing = self._step > self._zero self._init_len() def __bool__(self): if self._growing: return self._start < self._stop else: return self._start > self._stop def __contains__(self, elem): if self._growing: if self._start <= elem < self._stop: return (elem - self._start) % self._step == self._zero else: if self._start >= elem > self._stop: return (self._start - elem) % (-self._step) == self._zero return False def __eq__(self, other): if isinstance(other, numeric_range): empty_self = not bool(self) empty_other = not bool(other) if empty_self or empty_other: return empty_self and empty_other # True if both empty else: return ( self._start == other._start and self._step == other._step and self._get_by_index(-1) == other._get_by_index(-1) ) else: return False def __getitem__(self, key): if isinstance(key, int): return self._get_by_index(key) elif isinstance(key, slice): step = self._step if key.step is None else key.step * self._step if key.start is None or key.start <= -self._len: start = self._start elif key.start >= self._len: start = self._stop else: # -self._len < key.start < self._len start = self._get_by_index(key.start) if key.stop is None or key.stop >= self._len: stop = self._stop elif key.stop <= -self._len: stop = self._start else: # -self._len < key.stop < self._len stop = self._get_by_index(key.stop) return numeric_range(start, stop, step) else: raise TypeError( 'numeric range indices must be ' 'integers or slices, not {}'.format(type(key).__name__) ) def __hash__(self): if self: return hash((self._start, self._get_by_index(-1), self._step)) else: return self._EMPTY_HASH def __iter__(self): values = (self._start + (n * self._step) for n in count()) if self._growing: return takewhile(partial(gt, self._stop), values) else: return takewhile(partial(lt, self._stop), values) def __len__(self): return self._len def _init_len(self): if self._growing: start = self._start stop = self._stop step = self._step else: start = self._stop stop = self._start step = -self._step distance = stop - start if distance <= self._zero: self._len = 0 else: # distance > 0 and step > 0: regular euclidean division q, r = divmod(distance, step) self._len = int(q) + int(r != self._zero) def __reduce__(self): return numeric_range, (self._start, self._stop, self._step) def __repr__(self): if self._step == 1: return "numeric_range({}, {})".format( repr(self._start), repr(self._stop) ) else: return "numeric_range({}, {}, {})".format( repr(self._start), repr(self._stop), repr(self._step) ) def __reversed__(self): return iter( numeric_range( self._get_by_index(-1), self._start - self._step, -self._step ) ) def count(self, value): return int(value in self) def index(self, value): if self._growing: if self._start <= value < self._stop: q, r = divmod(value - self._start, self._step) if r == self._zero: return int(q) else: if self._start >= value > self._stop: q, r = divmod(self._start - value, -self._step) if r == self._zero: return int(q) raise ValueError("{} is not in numeric range".format(value)) def _get_by_index(self, i): if i < 0: i += self._len if i < 0 or i >= self._len: raise IndexError("numeric range object index out of range") return self._start + i * self._step def count_cycle(iterable, n=None): """Cycle through the items from *iterable* up to *n* times, yielding the number of completed cycles along with each item. If *n* is omitted the process repeats indefinitely. >>> list(count_cycle('AB', 3)) [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] """ iterable = tuple(iterable) if not iterable: return iter(()) counter = count() if n is None else range(n) return ((i, item) for i in counter for item in iterable) def mark_ends(iterable): """Yield 3-tuples of the form ``(is_first, is_last, item)``. >>> list(mark_ends('ABC')) [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] Use this when looping over an iterable to take special action on its first and/or last items: >>> iterable = ['Header', 100, 200, 'Footer'] >>> total = 0 >>> for is_first, is_last, item in mark_ends(iterable): ... if is_first: ... continue # Skip the header ... if is_last: ... continue # Skip the footer ... total += item >>> print(total) 300 """ it = iter(iterable) try: b = next(it) except StopIteration: return try: for i in count(): a = b b = next(it) yield i == 0, False, a except StopIteration: yield i == 0, True, a def locate(iterable, pred=bool, window_size=None): """Yield the index of each item in *iterable* for which *pred* returns ``True``. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(locate([0, 1, 1, 0, 1, 0, 0])) [1, 2, 4] Set *pred* to a custom function to, e.g., find the indexes for a particular item. >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) [1, 3] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(locate(iterable, pred=pred, window_size=3)) [1, 5, 9] Use with :func:`seekable` to find indexes and then retrieve the associated items: >>> from itertools import count >>> from more_itertools import seekable >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) >>> it = seekable(source) >>> pred = lambda x: x > 100 >>> indexes = locate(it, pred=pred) >>> i = next(indexes) >>> it.seek(i) >>> next(it) 106 """ if window_size is None: return compress(count(), map(pred, iterable)) if window_size < 1: raise ValueError('window size must be at least 1') it = windowed(iterable, window_size, fillvalue=_marker) return compress(count(), starmap(pred, it)) def lstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning for which *pred* returns ``True``. For example, to remove a set of items from the start of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(lstrip(iterable, pred)) [1, 2, None, 3, False, None] This function is analogous to to :func:`str.lstrip`, and is essentially an wrapper for :func:`itertools.dropwhile`. """ return dropwhile(pred, iterable) def rstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the end for which *pred* returns ``True``. For example, to remove a set of items from the end of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(rstrip(iterable, pred)) [None, False, None, 1, 2, None, 3] This function is analogous to :func:`str.rstrip`. """ cache = [] cache_append = cache.append cache_clear = cache.clear for x in iterable: if pred(x): cache_append(x) else: yield from cache cache_clear() yield x def strip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning and end for which *pred* returns ``True``. For example, to remove a set of items from both ends of an iterable: >>> iterable = (None, False, None, 1, 2, None, 3, False, None) >>> pred = lambda x: x in {None, False, ''} >>> list(strip(iterable, pred)) [1, 2, None, 3] This function is analogous to :func:`str.strip`. """ return rstrip(lstrip(iterable, pred), pred) class islice_extended: """An extension of :func:`itertools.islice` that supports negative values for *stop*, *start*, and *step*. >>> iterable = iter('abcdefgh') >>> list(islice_extended(iterable, -4, -1)) ['e', 'f', 'g'] Slices with negative values require some caching of *iterable*, but this function takes care to minimize the amount of memory required. For example, you can use a negative step with an infinite iterator: >>> from itertools import count >>> list(islice_extended(count(), 110, 99, -2)) [110, 108, 106, 104, 102, 100] You can also use slice notation directly: >>> iterable = map(str, count()) >>> it = islice_extended(iterable)[10:20:2] >>> list(it) ['10', '12', '14', '16', '18'] """ def __init__(self, iterable, *args): it = iter(iterable) if args: self._iterable = _islice_helper(it, slice(*args)) else: self._iterable = it def __iter__(self): return self def __next__(self): return next(self._iterable) def __getitem__(self, key): if isinstance(key, slice): return islice_extended(_islice_helper(self._iterable, key)) raise TypeError('islice_extended.__getitem__ argument must be a slice') def _islice_helper(it, s): start = s.start stop = s.stop if s.step == 0: raise ValueError('step argument must be a non-zero integer or None.') step = s.step or 1 if step > 0: start = 0 if (start is None) else start if start < 0: # Consume all but the last -start items cache = deque(enumerate(it, 1), maxlen=-start) len_iter = cache[-1][0] if cache else 0 # Adjust start to be positive i = max(len_iter + start, 0) # Adjust stop to be positive if stop is None: j = len_iter elif stop >= 0: j = min(stop, len_iter) else: j = max(len_iter + stop, 0) # Slice the cache n = j - i if n <= 0: return for index, item in islice(cache, 0, n, step): yield item elif (stop is not None) and (stop < 0): # Advance to the start position next(islice(it, start, start), None) # When stop is negative, we have to carry -stop items while # iterating cache = deque(islice(it, -stop), maxlen=-stop) for index, item in enumerate(it): cached_item = cache.popleft() if index % step == 0: yield cached_item cache.append(item) else: # When both start and stop are positive we have the normal case yield from islice(it, start, stop, step) else: start = -1 if (start is None) else start if (stop is not None) and (stop < 0): # Consume all but the last items n = -stop - 1 cache = deque(enumerate(it, 1), maxlen=n) len_iter = cache[-1][0] if cache else 0 # If start and stop are both negative they are comparable and # we can just slice. Otherwise we can adjust start to be negative # and then slice. if start < 0: i, j = start, stop else: i, j = min(start - len_iter, -1), None for index, item in list(cache)[i:j:step]: yield item else: # Advance to the stop position if stop is not None: m = stop + 1 next(islice(it, m, m), None) # stop is positive, so if start is negative they are not comparable # and we need the rest of the items. if start < 0: i = start n = None # stop is None and start is positive, so we just need items up to # the start index. elif stop is None: i = None n = start + 1 # Both stop and start are positive, so they are comparable. else: i = None n = start - stop if n <= 0: return cache = list(islice(it, n)) yield from cache[i::step] def always_reversible(iterable): """An extension of :func:`reversed` that supports all iterables, not just those which implement the ``Reversible`` or ``Sequence`` protocols. >>> print(*always_reversible(x for x in range(3))) 2 1 0 If the iterable is already reversible, this function returns the result of :func:`reversed()`. If the iterable is not reversible, this function will cache the remaining items in the iterable and yield them in reverse order, which may require significant storage. """ try: return reversed(iterable) except TypeError: return reversed(list(iterable)) def consecutive_groups(iterable, ordering=lambda x: x): """Yield groups of consecutive items using :func:`itertools.groupby`. The *ordering* function determines whether two items are adjacent by returning their position. By default, the ordering function is the identity function. This is suitable for finding runs of numbers: >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] >>> for group in consecutive_groups(iterable): ... print(list(group)) [1] [10, 11, 12] [20] [30, 31, 32, 33] [40] For finding runs of adjacent letters, try using the :meth:`index` method of a string of letters: >>> from string import ascii_lowercase >>> iterable = 'abcdfgilmnop' >>> ordering = ascii_lowercase.index >>> for group in consecutive_groups(iterable, ordering): ... print(list(group)) ['a', 'b', 'c', 'd'] ['f', 'g'] ['i'] ['l', 'm', 'n', 'o', 'p'] Each group of consecutive items is an iterator that shares it source with *iterable*. When an an output group is advanced, the previous group is no longer available unless its elements are copied (e.g., into a ``list``). >>> iterable = [1, 2, 11, 12, 21, 22] >>> saved_groups = [] >>> for group in consecutive_groups(iterable): ... saved_groups.append(list(group)) # Copy group elements >>> saved_groups [[1, 2], [11, 12], [21, 22]] """ for k, g in groupby( enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) ): yield map(itemgetter(1), g) def difference(iterable, func=sub, *, initial=None): """This function is the inverse of :func:`itertools.accumulate`. By default it will compute the first difference of *iterable* using :func:`operator.sub`: >>> from itertools import accumulate >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 >>> list(difference(iterable)) [0, 1, 2, 3, 4] *func* defaults to :func:`operator.sub`, but other functions can be specified. They will be applied as follows:: A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... For example, to do progressive division: >>> iterable = [1, 2, 6, 24, 120] >>> func = lambda x, y: x // y >>> list(difference(iterable, func)) [1, 2, 3, 4, 5] If the *initial* keyword is set, the first element will be skipped when computing successive differences. >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) >>> list(difference(it, initial=10)) [1, 2, 3] """ a, b = tee(iterable) try: first = [next(b)] except StopIteration: return iter([]) if initial is not None: first = [] return chain(first, starmap(func, zip(b, a))) class SequenceView(Sequence): """Return a read-only view of the sequence object *target*. :class:`SequenceView` objects are analogous to Python's built-in "dictionary view" types. They provide a dynamic view of a sequence's items, meaning that when the sequence updates, so does the view. >>> seq = ['0', '1', '2'] >>> view = SequenceView(seq) >>> view SequenceView(['0', '1', '2']) >>> seq.append('3') >>> view SequenceView(['0', '1', '2', '3']) Sequence views support indexing, slicing, and length queries. They act like the underlying sequence, except they don't allow assignment: >>> view[1] '1' >>> view[1:-1] ['1', '2'] >>> len(view) 4 Sequence views are useful as an alternative to copying, as they don't require (much) extra storage. """ def __init__(self, target): if not isinstance(target, Sequence): raise TypeError self._target = target def __getitem__(self, index): return self._target[index] def __len__(self): return len(self._target) def __repr__(self): return '{}({})'.format(self.__class__.__name__, repr(self._target)) class seekable: """Wrap an iterator to allow for seeking backward and forward. This progressively caches the items in the source iterable so they can be re-visited. Call :meth:`seek` with an index to seek to that position in the source iterable. To "reset" an iterator, seek to ``0``: >>> from itertools import count >>> it = seekable((str(n) for n in count())) >>> next(it), next(it), next(it) ('0', '1', '2') >>> it.seek(0) >>> next(it), next(it), next(it) ('0', '1', '2') >>> next(it) '3' You can also seek forward: >>> it = seekable((str(n) for n in range(20))) >>> it.seek(10) >>> next(it) '10' >>> it.seek(20) # Seeking past the end of the source isn't a problem >>> list(it) [] >>> it.seek(0) # Resetting works even after hitting the end >>> next(it), next(it), next(it) ('0', '1', '2') Call :meth:`peek` to look ahead one item without advancing the iterator: >>> it = seekable('1234') >>> it.peek() '1' >>> list(it) ['1', '2', '3', '4'] >>> it.peek(default='empty') 'empty' Before the iterator is at its end, calling :func:`bool` on it will return ``True``. After it will return ``False``: >>> it = seekable('5678') >>> bool(it) True >>> list(it) ['5', '6', '7', '8'] >>> bool(it) False You may view the contents of the cache with the :meth:`elements` method. That returns a :class:`SequenceView`, a view that updates automatically: >>> it = seekable((str(n) for n in range(10))) >>> next(it), next(it), next(it) ('0', '1', '2') >>> elements = it.elements() >>> elements SequenceView(['0', '1', '2']) >>> next(it) '3' >>> elements SequenceView(['0', '1', '2', '3']) By default, the cache grows as the source iterable progresses, so beware of wrapping very large or infinite iterables. Supply *maxlen* to limit the size of the cache (this of course limits how far back you can seek). >>> from itertools import count >>> it = seekable((str(n) for n in count()), maxlen=2) >>> next(it), next(it), next(it), next(it) ('0', '1', '2', '3') >>> list(it.elements()) ['2', '3'] >>> it.seek(0) >>> next(it), next(it), next(it), next(it) ('2', '3', '4', '5') >>> next(it) '6' """ def __init__(self, iterable, maxlen=None): self._source = iter(iterable) if maxlen is None: self._cache = [] else: self._cache = deque([], maxlen) self._index = None def __iter__(self): return self def __next__(self): if self._index is not None: try: item = self._cache[self._index] except IndexError: self._index = None else: self._index += 1 return item item = next(self._source) self._cache.append(item) return item def __bool__(self): try: self.peek() except StopIteration: return False return True def peek(self, default=_marker): try: peeked = next(self) except StopIteration: if default is _marker: raise return default if self._index is None: self._index = len(self._cache) self._index -= 1 return peeked def elements(self): return SequenceView(self._cache) def seek(self, index): self._index = index remainder = index - len(self._cache) if remainder > 0: consume(self, remainder) class run_length: """ :func:`run_length.encode` compresses an iterable with run-length encoding. It yields groups of repeated items with the count of how many times they were repeated: >>> uncompressed = 'abbcccdddd' >>> list(run_length.encode(uncompressed)) [('a', 1), ('b', 2), ('c', 3), ('d', 4)] :func:`run_length.decode` decompresses an iterable that was previously compressed with run-length encoding. It yields the items of the decompressed iterable: >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] >>> list(run_length.decode(compressed)) ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] """ @staticmethod def encode(iterable): return ((k, ilen(g)) for k, g in groupby(iterable)) @staticmethod def decode(iterable): return chain.from_iterable(repeat(k, n) for k, n in iterable) def exactly_n(iterable, n, predicate=bool): """Return ``True`` if exactly ``n`` items in the iterable are ``True`` according to the *predicate* function. >>> exactly_n([True, True, False], 2) True >>> exactly_n([True, True, False], 1) False >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) True The iterable will be advanced until ``n + 1`` truthy items are encountered, so avoid calling it on infinite iterables. """ return len(take(n + 1, filter(predicate, iterable))) == n def circular_shifts(iterable): """Return a list of circular shifts of *iterable*. >>> circular_shifts(range(4)) [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] """ lst = list(iterable) return take(len(lst), windowed(cycle(lst), len(lst))) def make_decorator(wrapping_func, result_index=0): """Return a decorator version of *wrapping_func*, which is a function that modifies an iterable. *result_index* is the position in that function's signature where the iterable goes. This lets you use itertools on the "production end," i.e. at function definition. This can augment what the function returns without changing the function's code. For example, to produce a decorator version of :func:`chunked`: >>> from more_itertools import chunked >>> chunker = make_decorator(chunked, result_index=0) >>> @chunker(3) ... def iter_range(n): ... return iter(range(n)) ... >>> list(iter_range(9)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] To only allow truthy items to be returned: >>> truth_serum = make_decorator(filter, result_index=1) >>> @truth_serum(bool) ... def boolean_test(): ... return [0, 1, '', ' ', False, True] ... >>> list(boolean_test()) [1, ' ', True] The :func:`peekable` and :func:`seekable` wrappers make for practical decorators: >>> from more_itertools import peekable >>> peekable_function = make_decorator(peekable) >>> @peekable_function() ... def str_range(*args): ... return (str(x) for x in range(*args)) ... >>> it = str_range(1, 20, 2) >>> next(it), next(it), next(it) ('1', '3', '5') >>> it.peek() '7' >>> next(it) '7' """ # See https://sites.google.com/site/bbayles/index/decorator_factory for # notes on how this works. def decorator(*wrapping_args, **wrapping_kwargs): def outer_wrapper(f): def inner_wrapper(*args, **kwargs): result = f(*args, **kwargs) wrapping_args_ = list(wrapping_args) wrapping_args_.insert(result_index, result) return wrapping_func(*wrapping_args_, **wrapping_kwargs) return inner_wrapper return outer_wrapper return decorator def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): """Return a dictionary that maps the items in *iterable* to categories defined by *keyfunc*, transforms them with *valuefunc*, and then summarizes them by category with *reducefunc*. *valuefunc* defaults to the identity function if it is unspecified. If *reducefunc* is unspecified, no summarization takes place: >>> keyfunc = lambda x: x.upper() >>> result = map_reduce('abbccc', keyfunc) >>> sorted(result.items()) [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] Specifying *valuefunc* transforms the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> result = map_reduce('abbccc', keyfunc, valuefunc) >>> sorted(result.items()) [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] Specifying *reducefunc* summarizes the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> reducefunc = sum >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) >>> sorted(result.items()) [('A', 1), ('B', 2), ('C', 3)] You may want to filter the input iterable before applying the map/reduce procedure: >>> all_items = range(30) >>> items = [x for x in all_items if 10 <= x <= 20] # Filter >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 >>> categories = map_reduce(items, keyfunc=keyfunc) >>> sorted(categories.items()) [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) >>> sorted(summaries.items()) [(0, 90), (1, 75)] Note that all items in the iterable are gathered into a list before the summarization step, which may require significant storage. The returned object is a :obj:`collections.defaultdict` with the ``default_factory`` set to ``None``, such that it behaves like a normal dictionary. """ valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc ret = defaultdict(list) for item in iterable: key = keyfunc(item) value = valuefunc(item) ret[key].append(value) if reducefunc is not None: for key, value_list in ret.items(): ret[key] = reducefunc(value_list) ret.default_factory = None return ret def rlocate(iterable, pred=bool, window_size=None): """Yield the index of each item in *iterable* for which *pred* returns ``True``, starting from the right and moving left. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 [4, 2, 1] Set *pred* to a custom function to, e.g., find the indexes for a particular item: >>> iterable = iter('abcb') >>> pred = lambda x: x == 'b' >>> list(rlocate(iterable, pred)) [3, 1] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(rlocate(iterable, pred=pred, window_size=3)) [9, 5, 1] Beware, this function won't return anything for infinite iterables. If *iterable* is reversible, ``rlocate`` will reverse it and search from the right. Otherwise, it will search from the left and return the results in reverse order. See :func:`locate` to for other example applications. """ if window_size is None: try: len_iter = len(iterable) return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) except TypeError: pass return reversed(list(locate(iterable, pred, window_size))) def replace(iterable, pred, substitutes, count=None, window_size=1): """Yield the items from *iterable*, replacing the items for which *pred* returns ``True`` with the items from the iterable *substitutes*. >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] >>> pred = lambda x: x == 0 >>> substitutes = (2, 3) >>> list(replace(iterable, pred, substitutes)) [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] If *count* is given, the number of replacements will be limited: >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] >>> pred = lambda x: x == 0 >>> substitutes = [None] >>> list(replace(iterable, pred, substitutes, count=2)) [1, 1, None, 1, 1, None, 1, 1, 0] Use *window_size* to control the number of items passed as arguments to *pred*. This allows for locating and replacing subsequences. >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] >>> window_size = 3 >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred >>> substitutes = [3, 4] # Splice in these items >>> list(replace(iterable, pred, substitutes, window_size=window_size)) [3, 4, 5, 3, 4, 5] """ if window_size < 1: raise ValueError('window_size must be at least 1') # Save the substitutes iterable, since it's used more than once substitutes = tuple(substitutes) # Add padding such that the number of windows matches the length of the # iterable it = chain(iterable, [_marker] * (window_size - 1)) windows = windowed(it, window_size) n = 0 for w in windows: # If the current window matches our predicate (and we haven't hit # our maximum number of replacements), splice in the substitutes # and then consume the following windows that overlap with this one. # For example, if the iterable is (0, 1, 2, 3, 4...) # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) if pred(*w): if (count is None) or (n < count): n += 1 yield from substitutes consume(windows, window_size - 1) continue # If there was no match (or we've reached the replacement limit), # yield the first item from the window. if w and (w[0] is not _marker): yield w[0] def partitions(iterable): """Yield all possible order-preserving partitions of *iterable*. >>> iterable = 'abc' >>> for part in partitions(iterable): ... print([''.join(p) for p in part]) ['abc'] ['a', 'bc'] ['ab', 'c'] ['a', 'b', 'c'] This is unrelated to :func:`partition`. """ sequence = list(iterable) n = len(sequence) for i in powerset(range(1, n)): yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] def set_partitions(iterable, k=None): """ Yield the set partitions of *iterable* into *k* parts. Set partitions are not order-preserving. >>> iterable = 'abc' >>> for part in set_partitions(iterable, 2): ... print([''.join(p) for p in part]) ['a', 'bc'] ['ab', 'c'] ['b', 'ac'] If *k* is not given, every set partition is generated. >>> iterable = 'abc' >>> for part in set_partitions(iterable): ... print([''.join(p) for p in part]) ['abc'] ['a', 'bc'] ['ab', 'c'] ['b', 'ac'] ['a', 'b', 'c'] """ L = list(iterable) n = len(L) if k is not None: if k < 1: raise ValueError( "Can't partition in a negative or zero number of groups" ) elif k > n: return def set_partitions_helper(L, k): n = len(L) if k == 1: yield [L] elif n == k: yield [[s] for s in L] else: e, *M = L for p in set_partitions_helper(M, k - 1): yield [[e], *p] for p in set_partitions_helper(M, k): for i in range(len(p)): yield p[:i] + [[e] + p[i]] + p[i + 1 :] if k is None: for k in range(1, n + 1): yield from set_partitions_helper(L, k) else: yield from set_partitions_helper(L, k) class time_limited: """ Yield items from *iterable* until *limit_seconds* have passed. If the time limit expires before all items have been yielded, the ``timed_out`` parameter will be set to ``True``. >>> from time import sleep >>> def generator(): ... yield 1 ... yield 2 ... sleep(0.2) ... yield 3 >>> iterable = time_limited(0.1, generator()) >>> list(iterable) [1, 2] >>> iterable.timed_out True Note that the time is checked before each item is yielded, and iteration stops if the time elapsed is greater than *limit_seconds*. If your time limit is 1 second, but it takes 2 seconds to generate the first item from the iterable, the function will run for 2 seconds and not yield anything. """ def __init__(self, limit_seconds, iterable): if limit_seconds < 0: raise ValueError('limit_seconds must be positive') self.limit_seconds = limit_seconds self._iterable = iter(iterable) self._start_time = monotonic() self.timed_out = False def __iter__(self): return self def __next__(self): item = next(self._iterable) if monotonic() - self._start_time > self.limit_seconds: self.timed_out = True raise StopIteration return item def only(iterable, default=None, too_long=None): """If *iterable* has only one item, return it. If it has zero items, return *default*. If it has more than one item, raise the exception given by *too_long*, which is ``ValueError`` by default. >>> only([], default='missing') 'missing' >>> only([1]) 1 >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Expected exactly one item in iterable, but got 1, 2, and perhaps more.' >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError Note that :func:`only` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) first_value = next(it, default) try: second_value = next(it) except StopIteration: pass else: msg = ( 'Expected exactly one item in iterable, but got {!r}, {!r}, ' 'and perhaps more.'.format(first_value, second_value) ) raise too_long or ValueError(msg) return first_value def ichunked(iterable, n): """Break *iterable* into sub-iterables with *n* elements each. :func:`ichunked` is like :func:`chunked`, but it yields iterables instead of lists. If the sub-iterables are read in order, the elements of *iterable* won't be stored in memory. If they are read out of order, :func:`itertools.tee` is used to cache elements as necessary. >>> from itertools import count >>> all_chunks = ichunked(count(), 4) >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) >>> list(c_2) # c_1's elements have been cached; c_3's haven't been [4, 5, 6, 7] >>> list(c_1) [0, 1, 2, 3] >>> list(c_3) [8, 9, 10, 11] """ source = iter(iterable) while True: # Check to see whether we're at the end of the source iterable item = next(source, _marker) if item is _marker: return # Clone the source and yield an n-length slice source, it = tee(chain([item], source)) yield islice(it, n) # Advance the source iterable consume(source, n) def distinct_combinations(iterable, r): """Yield the distinct combinations of *r* items taken from *iterable*. >>> list(distinct_combinations([0, 0, 1], 2)) [(0, 0), (0, 1)] Equivalent to ``set(combinations(iterable))``, except duplicates are not generated and thrown away. For larger input sequences this is much more efficient. """ if r < 0: raise ValueError('r must be non-negative') elif r == 0: yield () return pool = tuple(iterable) generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] current_combo = [None] * r level = 0 while generators: try: cur_idx, p = next(generators[-1]) except StopIteration: generators.pop() level -= 1 continue current_combo[level] = p if level + 1 == r: yield tuple(current_combo) else: generators.append( unique_everseen( enumerate(pool[cur_idx + 1 :], cur_idx + 1), key=itemgetter(1), ) ) level += 1 def filter_except(validator, iterable, *exceptions): """Yield the items from *iterable* for which the *validator* function does not raise one of the specified *exceptions*. *validator* is called for each item in *iterable*. It should be a function that accepts one argument and raises an exception if that item is not valid. >>> iterable = ['1', '2', 'three', '4', None] >>> list(filter_except(int, iterable, ValueError, TypeError)) ['1', '2', '4'] If an exception other than one given by *exceptions* is raised by *validator*, it is raised like normal. """ for item in iterable: try: validator(item) except exceptions: pass else: yield item def map_except(function, iterable, *exceptions): """Transform each item from *iterable* with *function* and yield the result, unless *function* raises one of the specified *exceptions*. *function* is called to transform each item in *iterable*. It should accept one argument. >>> iterable = ['1', '2', 'three', '4', None] >>> list(map_except(int, iterable, ValueError, TypeError)) [1, 2, 4] If an exception other than one given by *exceptions* is raised by *function*, it is raised like normal. """ for item in iterable: try: yield function(item) except exceptions: pass def map_if(iterable, pred, func, func_else=lambda x: x): """Evaluate each item from *iterable* using *pred*. If the result is equivalent to ``True``, transform the item with *func* and yield it. Otherwise, transform the item with *func_else* and yield it. *pred*, *func*, and *func_else* should each be functions that accept one argument. By default, *func_else* is the identity function. >>> from math import sqrt >>> iterable = list(range(-5, 5)) >>> iterable [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] >>> list(map_if(iterable, lambda x: x >= 0, ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] """ for item in iterable: yield func(item) if pred(item) else func_else(item) def _sample_unweighted(iterable, k): # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". # Fill up the reservoir (collection of samples) with the first `k` samples reservoir = take(k, iterable) # Generate random number that's the largest in a sample of k U(0,1) numbers # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic W = exp(log(random()) / k) # The number of elements to skip before changing the reservoir is a random # number with a geometric distribution. Sample it using random() and logs. next_index = k + floor(log(random()) / log(1 - W)) for index, element in enumerate(iterable, k): if index == next_index: reservoir[randrange(k)] = element # The new W is the largest in a sample of k U(0, `old_W`) numbers W *= exp(log(random()) / k) next_index += floor(log(random()) / log(1 - W)) + 1 return reservoir def _sample_weighted(iterable, k, weights): # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : # "Weighted random sampling with a reservoir". # Log-transform for numerical stability for weights that are small/large weight_keys = (log(random()) / weight for weight in weights) # Fill up the reservoir (collection of samples) with the first `k` # weight-keys and elements, then heapify the list. reservoir = take(k, zip(weight_keys, iterable)) heapify(reservoir) # The number of jumps before changing the reservoir is a random variable # with an exponential distribution. Sample it using random() and logs. smallest_weight_key, _ = reservoir[0] weights_to_skip = log(random()) / smallest_weight_key for weight, element in zip(weights, iterable): if weight >= weights_to_skip: # The notation here is consistent with the paper, but we store # the weight-keys in log-space for better numerical stability. smallest_weight_key, _ = reservoir[0] t_w = exp(weight * smallest_weight_key) r_2 = uniform(t_w, 1) # generate U(t_w, 1) weight_key = log(r_2) / weight heapreplace(reservoir, (weight_key, element)) smallest_weight_key, _ = reservoir[0] weights_to_skip = log(random()) / smallest_weight_key else: weights_to_skip -= weight # Equivalent to [element for weight_key, element in sorted(reservoir)] return [heappop(reservoir)[1] for _ in range(k)] def sample(iterable, k, weights=None): """Return a *k*-length list of elements chosen (without replacement) from the *iterable*. Like :func:`random.sample`, but works on iterables of unknown length. >>> iterable = range(100) >>> sample(iterable, 5) # doctest: +SKIP [81, 60, 96, 16, 4] An iterable with *weights* may also be given: >>> iterable = range(100) >>> weights = (i * i + 1 for i in range(100)) >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP [79, 67, 74, 66, 78] The algorithm can also be used to generate weighted random permutations. The relative weight of each item determines the probability that it appears late in the permutation. >>> data = "abcdefgh" >>> weights = range(1, len(data) + 1) >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] """ if k == 0: return [] iterable = iter(iterable) if weights is None: return _sample_unweighted(iterable, k) else: weights = iter(weights) return _sample_weighted(iterable, k, weights) def is_sorted(iterable, key=None, reverse=False, strict=False): """Returns ``True`` if the items of iterable are in sorted order, and ``False`` otherwise. *key* and *reverse* have the same meaning that they do in the built-in :func:`sorted` function. >>> is_sorted(['1', '2', '3', '4', '5'], key=int) True >>> is_sorted([5, 4, 3, 1, 2], reverse=True) False If *strict*, tests for strict sorting, that is, returns ``False`` if equal elements are found: >>> is_sorted([1, 2, 2]) True >>> is_sorted([1, 2, 2], strict=True) False The function returns ``False`` after encountering the first out-of-order item. If there are no out-of-order items, the iterable is exhausted. """ compare = (le if reverse else ge) if strict else (lt if reverse else gt) it = iterable if key is None else map(key, iterable) return not any(starmap(compare, pairwise(it))) class AbortThread(BaseException): pass class callback_iter: """Convert a function that uses callbacks to an iterator. Let *func* be a function that takes a `callback` keyword argument. For example: >>> def func(callback=None): ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: ... if callback: ... callback(i, c) ... return 4 Use ``with callback_iter(func)`` to get an iterator over the parameters that are delivered to the callback. >>> with callback_iter(func) as it: ... for args, kwargs in it: ... print(args) (1, 'a') (2, 'b') (3, 'c') The function will be called in a background thread. The ``done`` property indicates whether it has completed execution. >>> it.done True If it completes successfully, its return value will be available in the ``result`` property. >>> it.result 4 Notes: * If the function uses some keyword argument besides ``callback``, supply *callback_kwd*. * If it finished executing, but raised an exception, accessing the ``result`` property will raise the same exception. * If it hasn't finished executing, accessing the ``result`` property from within the ``with`` block will raise ``RuntimeError``. * If it hasn't finished executing, accessing the ``result`` property from outside the ``with`` block will raise a ``more_itertools.AbortThread`` exception. * Provide *wait_seconds* to adjust how frequently the it is polled for output. """ def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): self._func = func self._callback_kwd = callback_kwd self._aborted = False self._future = None self._wait_seconds = wait_seconds self._executor = ThreadPoolExecutor(max_workers=1) self._iterator = self._reader() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._aborted = True self._executor.shutdown() def __iter__(self): return self def __next__(self): return next(self._iterator) @property def done(self): if self._future is None: return False return self._future.done() @property def result(self): if not self.done: raise RuntimeError('Function has not yet completed') return self._future.result() def _reader(self): q = Queue() def callback(*args, **kwargs): if self._aborted: raise AbortThread('canceled by user') q.put((args, kwargs)) self._future = self._executor.submit( self._func, **{self._callback_kwd: callback} ) while True: try: item = q.get(timeout=self._wait_seconds) except Empty: pass else: q.task_done() yield item if self._future.done(): break remaining = [] while True: try: item = q.get_nowait() except Empty: break else: q.task_done() remaining.append(item) q.join() yield from remaining def windowed_complete(iterable, n): """ Yield ``(beginning, middle, end)`` tuples, where: * Each ``middle`` has *n* items from *iterable* * Each ``beginning`` has the items before the ones in ``middle`` * Each ``end`` has the items after the ones in ``middle`` >>> iterable = range(7) >>> n = 3 >>> for beginning, middle, end in windowed_complete(iterable, n): ... print(beginning, middle, end) () (0, 1, 2) (3, 4, 5, 6) (0,) (1, 2, 3) (4, 5, 6) (0, 1) (2, 3, 4) (5, 6) (0, 1, 2) (3, 4, 5) (6,) (0, 1, 2, 3) (4, 5, 6) () Note that *n* must be at least 0 and most equal to the length of *iterable*. This function will exhaust the iterable and may require significant storage. """ if n < 0: raise ValueError('n must be >= 0') seq = tuple(iterable) size = len(seq) if n > size: raise ValueError('n must be <= len(seq)') for i in range(size - n + 1): beginning = seq[:i] middle = seq[i : i + n] end = seq[i + n :] yield beginning, middle, end def all_unique(iterable, key=None): """ Returns ``True`` if all the elements of *iterable* are unique (no two elements are equal). >>> all_unique('ABCB') False If a *key* function is specified, it will be used to make comparisons. >>> all_unique('ABCb') True >>> all_unique('ABCb', str.lower) False The function returns as soon as the first non-unique element is encountered. Iterables with a mix of hashable and unhashable items can be used, but the function will be slower for unhashable items. """ seenset = set() seenset_add = seenset.add seenlist = [] seenlist_add = seenlist.append for element in map(key, iterable) if key else iterable: try: if element in seenset: return False seenset_add(element) except TypeError: if element in seenlist: return False seenlist_add(element) return True def nth_product(index, *args): """Equivalent to ``list(product(*args))[index]``. The products of *args* can be ordered lexicographically. :func:`nth_product` computes the product at sort position *index* without computing the previous products. >>> nth_product(8, range(2), range(2), range(2), range(2)) (1, 0, 0, 0) ``IndexError`` will be raised if the given *index* is invalid. """ pools = list(map(tuple, reversed(args))) ns = list(map(len, pools)) c = reduce(mul, ns) if index < 0: index += c if not 0 <= index < c: raise IndexError result = [] for pool, n in zip(pools, ns): result.append(pool[index % n]) index //= n return tuple(reversed(result)) def nth_permutation(iterable, r, index): """Equivalent to ``list(permutations(iterable, r))[index]``` The subsequences of *iterable* that are of length *r* where order is important can be ordered lexicographically. :func:`nth_permutation` computes the subsequence at sort position *index* directly, without computing the previous subsequences. >>> nth_permutation('ghijk', 2, 5) ('h', 'i') ``ValueError`` will be raised If *r* is negative or greater than the length of *iterable*. ``IndexError`` will be raised if the given *index* is invalid. """ pool = list(iterable) n = len(pool) if r is None or r == n: r, c = n, factorial(n) elif not 0 <= r < n: raise ValueError else: c = factorial(n) // factorial(n - r) if index < 0: index += c if not 0 <= index < c: raise IndexError if c == 0: return tuple() result = [0] * r q = index * factorial(n) // c if r < n else index for d in range(1, n + 1): q, i = divmod(q, d) if 0 <= n - d < r: result[n - d] = i if q == 0: break return tuple(map(pool.pop, result)) def value_chain(*args): """Yield all arguments passed to the function in the same order in which they were passed. If an argument itself is iterable then iterate over its values. >>> list(value_chain(1, 2, 3, [4, 5, 6])) [1, 2, 3, 4, 5, 6] Binary and text strings are not considered iterable and are emitted as-is: >>> list(value_chain('12', '34', ['56', '78'])) ['12', '34', '56', '78'] Multiple levels of nesting are not flattened. """ for value in args: if isinstance(value, (str, bytes)): yield value continue try: yield from value except TypeError: yield value def product_index(element, *args): """Equivalent to ``list(product(*args)).index(element)`` The products of *args* can be ordered lexicographically. :func:`product_index` computes the first index of *element* without computing the previous products. >>> product_index([8, 2], range(10), range(5)) 42 ``ValueError`` will be raised if the given *element* isn't in the product of *args*. """ index = 0 for x, pool in zip_longest(element, args, fillvalue=_marker): if x is _marker or pool is _marker: raise ValueError('element is not a product of args') pool = tuple(pool) index = index * len(pool) + pool.index(x) return index def combination_index(element, iterable): """Equivalent to ``list(combinations(iterable, r)).index(element)`` The subsequences of *iterable* that are of length *r* can be ordered lexicographically. :func:`combination_index` computes the index of the first *element*, without computing the previous combinations. >>> combination_index('adf', 'abcdefg') 10 ``ValueError`` will be raised if the given *element* isn't one of the combinations of *iterable*. """ element = enumerate(element) k, y = next(element, (None, None)) if k is None: return 0 indexes = [] pool = enumerate(iterable) for n, x in pool: if x == y: indexes.append(n) tmp, y = next(element, (None, None)) if tmp is None: break else: k = tmp else: raise ValueError('element is not a combination of iterable') n, _ = last(pool, default=(n, None)) # Python versiosn below 3.8 don't have math.comb index = 1 for i, j in enumerate(reversed(indexes), start=1): j = n - j if i <= j: index += factorial(j) // (factorial(i) * factorial(j - i)) return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index def permutation_index(element, iterable): """Equivalent to ``list(permutations(iterable, r)).index(element)``` The subsequences of *iterable* that are of length *r* where order is important can be ordered lexicographically. :func:`permutation_index` computes the index of the first *element* directly, without computing the previous permutations. >>> permutation_index([1, 3, 2], range(5)) 19 ``ValueError`` will be raised if the given *element* isn't one of the permutations of *iterable*. """ index = 0 pool = list(iterable) for i, x in zip(range(len(pool), -1, -1), element): r = pool.index(x) index = index * i + r del pool[r] return index class countable: """Wrap *iterable* and keep a count of how many items have been consumed. The ``items_seen`` attribute starts at ``0`` and increments as the iterable is consumed: >>> iterable = map(str, range(10)) >>> it = countable(iterable) >>> it.items_seen 0 >>> next(it), next(it) ('0', '1') >>> list(it) ['2', '3', '4', '5', '6', '7', '8', '9'] >>> it.items_seen 10 """ def __init__(self, iterable): self._it = iter(iterable) self.items_seen = 0 def __iter__(self): return self def __next__(self): item = next(self._it) self.items_seen += 1 return item def chunked_even(iterable, n): """Break *iterable* into lists of approximately length *n*. Items are distributed such the lengths of the lists differ by at most 1 item. >>> iterable = [1, 2, 3, 4, 5, 6, 7] >>> n = 3 >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 [[1, 2, 3], [4, 5], [6, 7]] >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 [[1, 2, 3], [4, 5, 6], [7]] """ len_method = getattr(iterable, '__len__', None) if len_method is None: return _chunked_even_online(iterable, n) else: return _chunked_even_finite(iterable, len_method(), n) def _chunked_even_online(iterable, n): buffer = [] maxbuf = n + (n - 2) * (n - 1) for x in iterable: buffer.append(x) if len(buffer) == maxbuf: yield buffer[:n] buffer = buffer[n:] yield from _chunked_even_finite(buffer, len(buffer), n) def _chunked_even_finite(iterable, N, n): if N < 1: return # Lists are either size `full_size <= n` or `partial_size = full_size - 1` q, r = divmod(N, n) num_lists = q + (1 if r > 0 else 0) q, r = divmod(N, num_lists) full_size = q + (1 if r > 0 else 0) partial_size = full_size - 1 num_full = N - partial_size * num_lists num_partial = num_lists - num_full buffer = [] iterator = iter(iterable) # Yield num_full lists of full_size for x in iterator: buffer.append(x) if len(buffer) == full_size: yield buffer buffer = [] num_full -= 1 if num_full <= 0: break # Yield num_partial lists of partial_size for x in iterator: buffer.append(x) if len(buffer) == partial_size: yield buffer buffer = [] num_partial -= 1 def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): """A version of :func:`zip` that "broadcasts" any scalar (i.e., non-iterable) items into output tuples. >>> iterable_1 = [1, 2, 3] >>> iterable_2 = ['a', 'b', 'c'] >>> scalar = '_' >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] The *scalar_types* keyword argument determines what types are considered scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to treat strings and byte strings as iterable: >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] If the *strict* keyword argument is ``True``, then ``UnequalIterablesError`` will be raised if any of the iterables have different lengthss. """ def is_scalar(obj): if scalar_types and isinstance(obj, scalar_types): return True try: iter(obj) except TypeError: return True else: return False size = len(objects) if not size: return iterables, iterable_positions = [], [] scalars, scalar_positions = [], [] for i, obj in enumerate(objects): if is_scalar(obj): scalars.append(obj) scalar_positions.append(i) else: iterables.append(iter(obj)) iterable_positions.append(i) if len(scalars) == size: yield tuple(objects) return zipper = _zip_equal if strict else zip for item in zipper(*iterables): new_item = [None] * size for i, elem in zip(iterable_positions, item): new_item[i] = elem for i, elem in zip(scalar_positions, scalars): new_item[i] = elem yield tuple(new_item) def unique_in_window(iterable, n, key=None): """Yield the items from *iterable* that haven't been seen recently. *n* is the size of the lookback window. >>> iterable = [0, 1, 0, 2, 3, 0] >>> n = 3 >>> list(unique_in_window(iterable, n)) [0, 1, 2, 3, 0] The *key* function, if provided, will be used to determine uniqueness: >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) ['a', 'b', 'c', 'd', 'a'] The items in *iterable* must be hashable. """ if n <= 0: raise ValueError('n must be greater than 0') window = deque(maxlen=n) uniques = set() use_key = key is not None for item in iterable: k = key(item) if use_key else item if k in uniques: continue if len(uniques) == n: uniques.discard(window[0]) uniques.add(k) window.append(k) yield item def duplicates_everseen(iterable, key=None): """Yield duplicate elements after their first appearance. >>> list(duplicates_everseen('mississippi')) ['s', 'i', 's', 's', 'i', 'p', 'i'] >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] This function is analagous to :func:`unique_everseen` and is subject to the same performance considerations. """ seen_set = set() seen_list = [] use_key = key is not None for element in iterable: k = key(element) if use_key else element try: if k not in seen_set: seen_set.add(k) else: yield element except TypeError: if k not in seen_list: seen_list.append(k) else: yield element def duplicates_justseen(iterable, key=None): """Yields serially-duplicate elements after their first appearance. >>> list(duplicates_justseen('mississippi')) ['s', 's', 'p'] >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] This function is analagous to :func:`unique_justseen`. """ return flatten( map( lambda group_tuple: islice_extended(group_tuple[1])[1:], groupby(iterable, key), ) ) def minmax(iterable_or_value, *others, key=None, default=_marker): """Returns both the smallest and largest items in an iterable or the largest of two or more arguments. >>> minmax([3, 1, 5]) (1, 5) >>> minmax(4, 2, 6) (2, 6) If a *key* function is provided, it will be used to transform the input items for comparison. >>> minmax([5, 30], key=str) # '30' sorts before '5' (30, 5) If a *default* value is provided, it will be returned if there are no input items. >>> minmax([], default=(0, 0)) (0, 0) Otherwise ``ValueError`` is raised. This function is based on the `recipe <http://code.activestate.com/recipes/577916/>`__ by Raymond Hettinger and takes care to minimize the number of comparisons performed. """ iterable = (iterable_or_value, *others) if others else iterable_or_value it = iter(iterable) try: lo = hi = next(it) except StopIteration as e: if default is _marker: raise ValueError( '`minmax()` argument is an empty iterable. ' 'Provide a `default` value to suppress this error.' ) from e return default # Different branches depending on the presence of key. This saves a lot # of unimportant copies which would slow the "key=None" branch # significantly down. if key is None: for x, y in zip_longest(it, it, fillvalue=lo): if y < x: x, y = y, x if x < lo: lo = x if hi < y: hi = y else: lo_key = hi_key = key(lo) for x, y in zip_longest(it, it, fillvalue=lo): x_key, y_key = key(x), key(y) if y_key < x_key: x, y, x_key, y_key = y, x, y_key, x_key if x_key < lo_key: lo, lo_key = x, x_key if hi_key < y_key: hi, hi_key = y, y_key return lo, hi
132,578
Python
.py
3,371
31.167606
79
0.572489
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,423
recipes.pyi
rembo10_headphones/lib/more_itertools/recipes.pyi
"""Stubs for more_itertools.recipes""" from typing import ( Any, Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union, ) from typing_extensions import overload, Type # Type and type variable definitions _T = TypeVar('_T') _U = TypeVar('_U') def take(n: int, iterable: Iterable[_T]) -> List[_T]: ... def tabulate( function: Callable[[int], _T], start: int = ... ) -> Iterator[_T]: ... def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ... def consume(iterator: Iterable[object], n: Optional[int] = ...) -> None: ... @overload def nth(iterable: Iterable[_T], n: int) -> Optional[_T]: ... @overload def nth(iterable: Iterable[_T], n: int, default: _U) -> Union[_T, _U]: ... def all_equal(iterable: Iterable[object]) -> bool: ... def quantify( iterable: Iterable[_T], pred: Callable[[_T], bool] = ... ) -> int: ... def pad_none(iterable: Iterable[_T]) -> Iterator[Optional[_T]]: ... def padnone(iterable: Iterable[_T]) -> Iterator[Optional[_T]]: ... def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ... def dotproduct(vec1: Iterable[object], vec2: Iterable[object]) -> object: ... def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ... def repeatfunc( func: Callable[..., _U], times: Optional[int] = ..., *args: Any ) -> Iterator[_U]: ... def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ... @overload def grouper( iterable: Iterable[_T], n: int ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def grouper( iterable: Iterable[_T], n: int, fillvalue: _U ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... @overload def grouper( # Deprecated interface iterable: int, n: Iterable[_T] ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def grouper( # Deprecated interface iterable: int, n: Iterable[_T], fillvalue: _U ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ... def partition( pred: Optional[Callable[[_T], object]], iterable: Iterable[_T] ) -> Tuple[Iterator[_T], Iterator[_T]]: ... def powerset(iterable: Iterable[_T]) -> Iterator[Tuple[_T, ...]]: ... def unique_everseen( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = ... ) -> Iterator[_T]: ... def unique_justseen( iterable: Iterable[_T], key: Optional[Callable[[_T], object]] = ... ) -> Iterator[_T]: ... @overload def iter_except( func: Callable[[], _T], exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], first: None = ..., ) -> Iterator[_T]: ... @overload def iter_except( func: Callable[[], _T], exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], first: Callable[[], _U], ) -> Iterator[Union[_T, _U]]: ... @overload def first_true( iterable: Iterable[_T], *, pred: Optional[Callable[[_T], object]] = ... ) -> Optional[_T]: ... @overload def first_true( iterable: Iterable[_T], default: _U, pred: Optional[Callable[[_T], object]] = ..., ) -> Union[_T, _U]: ... def random_product( *args: Iterable[_T], repeat: int = ... ) -> Tuple[_T, ...]: ... def random_permutation( iterable: Iterable[_T], r: Optional[int] = ... ) -> Tuple[_T, ...]: ... def random_combination(iterable: Iterable[_T], r: int) -> Tuple[_T, ...]: ... def random_combination_with_replacement( iterable: Iterable[_T], r: int ) -> Tuple[_T, ...]: ... def nth_combination( iterable: Iterable[_T], r: int, index: int ) -> Tuple[_T, ...]: ... def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[Union[_T, _U]]: ... def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ... def before_and_after( predicate: Callable[[_T], bool], it: Iterable[_T] ) -> Tuple[Iterator[_T], Iterator[_T]]: ... def triplewise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T, _T]]: ... def sliding_window( iterable: Iterable[_T], n: int ) -> Iterator[Tuple[_T, ...]]: ...
3,925
Python
.py
110
33.390909
78
0.617624
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,424
__init__.py
rembo10_headphones/lib/more_itertools/__init__.py
from .more import * # noqa from .recipes import * # noqa __version__ = '8.12.0'
83
Python
.py
3
26.333333
30
0.632911
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,425
recipes.py
rembo10_headphones/lib/more_itertools/recipes.py
"""Imported from the recipes section of the itertools documentation. All functions taken from the recipes section of the itertools library docs [1]_. Some backward-compatible usability improvements have been made. .. [1] http://docs.python.org/library/itertools.html#recipes """ import warnings from collections import deque from itertools import ( chain, combinations, count, cycle, groupby, islice, repeat, starmap, tee, zip_longest, ) import operator from random import randrange, sample, choice __all__ = [ 'all_equal', 'before_and_after', 'consume', 'convolve', 'dotproduct', 'first_true', 'flatten', 'grouper', 'iter_except', 'ncycles', 'nth', 'nth_combination', 'padnone', 'pad_none', 'pairwise', 'partition', 'powerset', 'prepend', 'quantify', 'random_combination_with_replacement', 'random_combination', 'random_permutation', 'random_product', 'repeatfunc', 'roundrobin', 'sliding_window', 'tabulate', 'tail', 'take', 'triplewise', 'unique_everseen', 'unique_justseen', ] def take(n, iterable): """Return first *n* items of the iterable as a list. >>> take(3, range(10)) [0, 1, 2] If there are fewer than *n* items in the iterable, all of them are returned. >>> take(10, range(3)) [0, 1, 2] """ return list(islice(iterable, n)) def tabulate(function, start=0): """Return an iterator over the results of ``func(start)``, ``func(start + 1)``, ``func(start + 2)``... *func* should be a function that accepts one integer argument. If *start* is not specified it defaults to 0. It will be incremented each time the iterator is advanced. >>> square = lambda x: x ** 2 >>> iterator = tabulate(square, -3) >>> take(4, iterator) [9, 4, 1, 0] """ return map(function, count(start)) def tail(n, iterable): """Return an iterator over the last *n* items of *iterable*. >>> t = tail(3, 'ABCDEFG') >>> list(t) ['E', 'F', 'G'] """ return iter(deque(iterable, maxlen=n)) def consume(iterator, n=None): """Advance *iterable* by *n* steps. If *n* is ``None``, consume it entirely. Efficiently exhausts an iterator without returning values. Defaults to consuming the whole iterator, but an optional second argument may be provided to limit consumption. >>> i = (x for x in range(10)) >>> next(i) 0 >>> consume(i, 3) >>> next(i) 4 >>> consume(i) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration If the iterator has fewer items remaining than the provided limit, the whole iterator will be consumed. >>> i = (x for x in range(3)) >>> consume(i, 5) >>> next(i) Traceback (most recent call last): File "<stdin>", line 1, in <module> StopIteration """ # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None) def nth(iterable, n, default=None): """Returns the nth item or a default value. >>> l = range(10) >>> nth(l, 3) 3 >>> nth(l, 20, "zebra") 'zebra' """ return next(islice(iterable, n, None), default) def all_equal(iterable): """ Returns ``True`` if all the elements are equal to each other. >>> all_equal('aaaa') True >>> all_equal('aaab') False """ g = groupby(iterable) return next(g, True) and not next(g, False) def quantify(iterable, pred=bool): """Return the how many times the predicate is true. >>> quantify([True, False, True]) 2 """ return sum(map(pred, iterable)) def pad_none(iterable): """Returns the sequence of elements and then returns ``None`` indefinitely. >>> take(5, pad_none(range(3))) [0, 1, 2, None, None] Useful for emulating the behavior of the built-in :func:`map` function. See also :func:`padded`. """ return chain(iterable, repeat(None)) padnone = pad_none def ncycles(iterable, n): """Returns the sequence elements *n* times >>> list(ncycles(["a", "b"], 3)) ['a', 'b', 'a', 'b', 'a', 'b'] """ return chain.from_iterable(repeat(tuple(iterable), n)) def dotproduct(vec1, vec2): """Returns the dot product of the two iterables. >>> dotproduct([10, 10], [20, 20]) 400 """ return sum(map(operator.mul, vec1, vec2)) def flatten(listOfLists): """Return an iterator flattening one level of nesting in a list of lists. >>> list(flatten([[0, 1], [2, 3]])) [0, 1, 2, 3] See also :func:`collapse`, which can flatten multiple levels of nesting. """ return chain.from_iterable(listOfLists) def repeatfunc(func, times=None, *args): """Call *func* with *args* repeatedly, returning an iterable over the results. If *times* is specified, the iterable will terminate after that many repetitions: >>> from operator import add >>> times = 4 >>> args = 3, 5 >>> list(repeatfunc(add, times, *args)) [8, 8, 8, 8] If *times* is ``None`` the iterable will not terminate: >>> from random import randrange >>> times = None >>> args = 1, 11 >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP [2, 4, 8, 1, 8, 4] """ if times is None: return starmap(func, repeat(args)) return starmap(func, repeat(args, times)) def _pairwise(iterable): """Returns an iterator of paired items, overlapping, from the original >>> take(4, pairwise(count())) [(0, 1), (1, 2), (2, 3), (3, 4)] On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. """ a, b = tee(iterable) next(b, None) yield from zip(a, b) try: from itertools import pairwise as itertools_pairwise except ImportError: pairwise = _pairwise else: def pairwise(iterable): yield from itertools_pairwise(iterable) pairwise.__doc__ = _pairwise.__doc__ def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks. >>> list(grouper('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] """ if isinstance(iterable, int): warnings.warn( "grouper expects iterable as first parameter", DeprecationWarning ) n, iterable = iterable, n args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def roundrobin(*iterables): """Yields an item from each iterable, alternating between them. >>> list(roundrobin('ABC', 'D', 'EF')) ['A', 'D', 'E', 'B', 'F', 'C'] This function produces the same output as :func:`interleave_longest`, but may perform better for some inputs (in particular when the number of iterables is small). """ # Recipe credited to George Sakkis pending = len(iterables) nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending)) def partition(pred, iterable): """ Returns a 2-tuple of iterables derived from the input iterable. The first yields the items that have ``pred(item) == False``. The second yields the items that have ``pred(item) == True``. >>> is_odd = lambda x: x % 2 != 0 >>> iterable = range(10) >>> even_items, odd_items = partition(is_odd, iterable) >>> list(even_items), list(odd_items) ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) If *pred* is None, :func:`bool` is used. >>> iterable = [0, 1, False, True, '', ' '] >>> false_items, true_items = partition(None, iterable) >>> list(false_items), list(true_items) ([0, False, ''], [1, True, ' ']) """ if pred is None: pred = bool evaluations = ((pred(x), x) for x in iterable) t1, t2 = tee(evaluations) return ( (x for (cond, x) in t1 if not cond), (x for (cond, x) in t2 if cond), ) def powerset(iterable): """Yields all possible subsets of the iterable. >>> list(powerset([1, 2, 3])) [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] :func:`powerset` will operate on iterables that aren't :class:`set` instances, so repeated elements in the input will produce repeated elements in the output. Use :func:`unique_everseen` on the input to avoid generating duplicates: >>> seq = [1, 1, 0] >>> list(powerset(seq)) [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] >>> from more_itertools import unique_everseen >>> list(powerset(unique_everseen(seq))) [(), (1,), (0,), (1, 0)] """ s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) def unique_everseen(iterable, key=None): """ Yield unique elements, preserving order. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] Sequences with a mix of hashable and unhashable items can be used. The function will be slower (i.e., `O(n^2)`) for unhashable items. Remember that ``list`` objects are unhashable - you can use the *key* parameter to transform the list to a tuple (which is hashable) to avoid a slowdown. >>> iterable = ([1, 2], [2, 3], [1, 2]) >>> list(unique_everseen(iterable)) # Slow [[1, 2], [2, 3]] >>> list(unique_everseen(iterable, key=tuple)) # Faster [[1, 2], [2, 3]] Similary, you may want to convert unhashable ``set`` objects with ``key=frozenset``. For ``dict`` objects, ``key=lambda x: frozenset(x.items())`` can be used. """ seenset = set() seenset_add = seenset.add seenlist = [] seenlist_add = seenlist.append use_key = key is not None for element in iterable: k = key(element) if use_key else element try: if k not in seenset: seenset_add(k) yield element except TypeError: if k not in seenlist: seenlist_add(k) yield element def unique_justseen(iterable, key=None): """Yields elements in order, ignoring serial duplicates >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ return map(next, map(operator.itemgetter(1), groupby(iterable, key))) def iter_except(func, exception, first=None): """Yields results from a function repeatedly until an exception is raised. Converts a call-until-exception interface to an iterator interface. Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel to end the loop. >>> l = [0, 1, 2] >>> list(iter_except(l.pop, IndexError)) [2, 1, 0] Multiple exceptions can be specified as a stopping condition: >>> l = [1, 2, 3, '...', 4, 5, 6] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [7, 6, 5] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [4, 3, 2] >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) [] """ try: if first is not None: yield first() while 1: yield func() except exception: pass def first_true(iterable, default=None, pred=None): """ Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which ``pred(item) == True`` . >>> first_true(range(10)) 1 >>> first_true(range(10), pred=lambda x: x > 5) 6 >>> first_true(range(10), default='missing', pred=lambda x: x > 9) 'missing' """ return next(filter(pred, iterable), default) def random_product(*args, repeat=1): """Draw an item at random from each of the input iterables. >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP ('c', 3, 'Z') If *repeat* is provided as a keyword argument, that many items will be drawn from each iterable. >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP ('a', 2, 'd', 3) This equivalent to taking a random selection from ``itertools.product(*args, **kwarg)``. """ pools = [tuple(pool) for pool in args] * repeat return tuple(choice(pool) for pool in pools) def random_permutation(iterable, r=None): """Return a random *r* length permutation of the elements in *iterable*. If *r* is not specified or is ``None``, then *r* defaults to the length of *iterable*. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) This equivalent to taking a random selection from ``itertools.permutations(iterable, r)``. """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(sample(pool, r)) def random_combination(iterable, r): """Return a random *r* length subsequence of the elements in *iterable*. >>> random_combination(range(5), 3) # doctest:+SKIP (2, 3, 4) This equivalent to taking a random selection from ``itertools.combinations(iterable, r)``. """ pool = tuple(iterable) n = len(pool) indices = sorted(sample(range(n), r)) return tuple(pool[i] for i in indices) def random_combination_with_replacement(iterable, r): """Return a random *r* length subsequence of elements in *iterable*, allowing individual elements to be repeated. >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP (0, 0, 1, 2, 2) This equivalent to taking a random selection from ``itertools.combinations_with_replacement(iterable, r)``. """ pool = tuple(iterable) n = len(pool) indices = sorted(randrange(n) for i in range(r)) return tuple(pool[i] for i in indices) def nth_combination(iterable, r, index): """Equivalent to ``list(combinations(iterable, r))[index]``. The subsequences of *iterable* that are of length *r* can be ordered lexicographically. :func:`nth_combination` computes the subsequence at sort position *index* directly, without computing the previous subsequences. >>> nth_combination(range(5), 3, 5) (0, 3, 4) ``ValueError`` will be raised If *r* is negative or greater than the length of *iterable*. ``IndexError`` will be raised if the given *index* is invalid. """ pool = tuple(iterable) n = len(pool) if (r < 0) or (r > n): raise ValueError c = 1 k = min(r, n - r) for i in range(1, k + 1): c = c * (n - k + i) // i if index < 0: index += c if (index < 0) or (index >= c): raise IndexError result = [] while r: c, n, r = c * r // n, n - 1, r - 1 while index >= c: index -= c c, n = c * (n - r) // n, n - 1 result.append(pool[-1 - n]) return tuple(result) def prepend(value, iterator): """Yield *value*, followed by the elements in *iterator*. >>> value = '0' >>> iterator = ['1', '2', '3'] >>> list(prepend(value, iterator)) ['0', '1', '2', '3'] To prepend multiple values, see :func:`itertools.chain` or :func:`value_chain`. """ return chain([value], iterator) def convolve(signal, kernel): """Convolve the iterable *signal* with the iterable *kernel*. >>> signal = (1, 2, 3, 4, 5) >>> kernel = [3, 2, 1] >>> list(convolve(signal, kernel)) [3, 8, 14, 20, 26, 14, 5] Note: the input arguments are not interchangeable, as the *kernel* is immediately consumed and stored. """ kernel = tuple(kernel)[::-1] n = len(kernel) window = deque([0], maxlen=n) * n for x in chain(signal, repeat(0, n - 1)): window.append(x) yield sum(map(operator.mul, kernel, window)) def before_and_after(predicate, it): """A variant of :func:`takewhile` that allows complete access to the remainder of the iterator. >>> it = iter('ABCdEfGhI') >>> all_upper, remainder = before_and_after(str.isupper, it) >>> ''.join(all_upper) 'ABC' >>> ''.join(remainder) # takewhile() would lose the 'd' 'dEfGhI' Note that the first iterator must be fully consumed before the second iterator can generate valid results. """ it = iter(it) transition = [] def true_iterator(): for elem in it: if predicate(elem): yield elem else: transition.append(elem) return def remainder_iterator(): yield from transition yield from it return true_iterator(), remainder_iterator() def triplewise(iterable): """Return overlapping triplets from *iterable*. >>> list(triplewise('ABCDE')) [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] """ for (a, _), (b, c) in pairwise(pairwise(iterable)): yield a, b, c def sliding_window(iterable, n): """Return a sliding window of width *n* over *iterable*. >>> list(sliding_window(range(6), 4)) [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] If *iterable* has fewer than *n* items, then nothing is yielded: >>> list(sliding_window(range(3), 4)) [] For a variant with more features, see :func:`windowed`. """ it = iter(iterable) window = deque(islice(it, n), maxlen=n) if len(window) == n: yield tuple(window) for x in it: window.append(x) yield tuple(window)
18,410
Python
.py
514
29.256809
79
0.595867
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,426
client.py
rembo10_headphones/lib/qbittorrentv2/client.py
import requests import json class LoginRequired(Exception): def __str__(self): return 'Please login first.' class Client(object): """class to interact with qBittorrent WEB API""" def __init__(self, url, verify=True): """ Initialize the client :param url: Base URL of the qBittorrent WEB API :param verify: Boolean to specify if SSL verification should be done. Defaults to True. """ if not url.endswith('/'): url += '/' self.url = url + 'api/v2/' self.verify = verify session = requests.Session() prefs_url = self.url + 'app/preferences' check_prefs = session.get(prefs_url, verify=self.verify) if check_prefs.status_code == 200: self._is_authenticated = True self.session = session elif check_prefs.status_code == 404: self._is_authenticated = False raise RuntimeError(""" This wrapper only supports qBittorrent applications with version higher than 4.1.x. Please use the latest qBittorrent release. """) else: self._is_authenticated = False def _get(self, endpoint, **kwargs): """ Method to perform GET request on the API. :param endpoint: Endpoint of the API. :param kwargs: Other keyword arguments for requests. :return: Response of the GET request. """ return self._request(endpoint, 'get', **kwargs) def _post(self, endpoint, data, **kwargs): """ Method to perform POST request on the API. :param endpoint: Endpoint of the API. :param data: POST DATA for the request. :param kwargs: Other keyword arguments for requests. :return: Response of the POST request. """ return self._request(endpoint, 'post', data, **kwargs) def _request(self, endpoint, method, data=None, **kwargs): """ Method to hanle both GET and POST requests. :param endpoint: Endpoint of the API. :param method: Method of HTTP request. :param data: POST DATA for the request. :param kwargs: Other keyword arguments. :return: Response for the request. """ final_url = self.url + endpoint if not self._is_authenticated: raise LoginRequired kwargs['verify'] = self.verify if method == 'get': request = self.session.get(final_url, **kwargs) else: request = self.session.post(final_url, data, **kwargs) request.raise_for_status() request.encoding = 'utf_8' if len(request.text) == 0: data = json.loads('{}') else: try: data = json.loads(request.text) except ValueError: data = request.text return data def login(self, username='admin', password='admin'): """ Method to authenticate the qBittorrent Client. Declares a class attribute named ``session`` which stores the authenticated session if the login is correct. Else, shows the login error. :param username: Username. :param password: Password. :return: Response to login request to the API. """ self.session = requests.Session() login = self.session.post(self.url + 'auth/login', data={'username': username, 'password': password}, verify=self.verify) if login.text == 'Ok.': self._is_authenticated = True else: return login.text def logout(self): """ Logout the current session. """ response = self._get('auth/logout') self._is_authenticated = False return response @property def qbittorrent_version(self): """ Get qBittorrent version. """ return self._get('app/version') @property def api_version(self): """ Get WEB API version. """ return self._get('app/webapiVersion') def shutdown(self): """ Shutdown qBittorrent. """ return self._get('app/shutdown') def get_default_save_path(self): """ Get default save path. """ return self._get('app/defaultSavePath') def get_log(self, **params): """ Returns a list of log entries matching the supplied params. :param normal: Include normal messages (default: true). :param info: Include info messages (default: true). :param warning: Include warning messages (default: true). :param critical: Include critical messages (default: true). :param last_known_id: Exclude messages with "message id" <= last_known_id (default: -1). :return: list(). For example: qb.get_log(normal='true', info='true') """ return self._get('log/main', params=params) def torrents(self, **filters): """ Returns a list of torrents matching the supplied filters. :param filter: Current status of the torrents. :param category: Fetch all torrents with the supplied label. :param sort: Sort torrents by. :param reverse: Enable reverse sorting. :param limit: Limit the number of torrents returned. :param offset: Set offset (if less than 0, offset from end). :return: list() of torrent with matching filter. For example: qb.torrents(filter='downloading', sort='ratio'). """ params = {} for name, value in list(filters.items()): # make sure that old 'status' argument still works name = 'filter' if name == 'status' else name params[name] = value return self._get('torrents/info', params=params) def get_torrent(self, infohash): """ Get details of the torrent. :param infohash: INFO HASH of the torrent. """ return self._get('torrents/properties?hash=' + infohash.lower()) def get_torrent_trackers(self, infohash): """ Get trackers for the torrent. :param infohash: INFO HASH of the torrent. """ return self._get('torrents/trackers?hash=' + infohash.lower()) def get_torrent_webseeds(self, infohash): """ Get webseeds for the torrent. :param infohash: INFO HASH of the torrent. """ return self._get('torrents/webseeds?hash=' + infohash.lower()) def get_torrent_files(self, infohash): """ Get list of files for the torrent. :param infohash: INFO HASH of the torrent. """ return self._get('torrents/files?hash=' + infohash.lower()) def get_torrent_piece_states(self, infohash): """ Get list of all pieces (in order) of a specific torrent. :param infohash: INFO HASH of the torrent. :return: array of states (integers). """ return self._get('torrents/pieceStates?hash=' + infohash.lower()) def get_torrent_piece_hashes(self, infohash): """ Get list of all hashes (in order) of a specific torrent. :param infohash: INFO HASH of the torrent. :return: array of hashes (strings). """ return self._get('torrents/pieceHashes?hash=' + infohash.lower()) @property def global_transfer_info(self): """ :return: dict{} of the global transfer info of qBittorrent. """ return self._get('transfer/info') @property def preferences(self): """ Get the current qBittorrent preferences. Can also be used to assign individual preferences. For setting multiple preferences at once, see ``set_preferences`` method. Note: Even if this is a ``property``, to fetch the current preferences dict, you are required to call it like a bound method. Wrong:: qb.preferences Right:: qb.preferences() """ prefs = self._get('app/preferences') class Proxy(Client): """ Proxy class to to allow assignment of individual preferences. this class overrides some methods to ease things. Because of this, settings can be assigned like:: In [5]: prefs = qb.preferences() In [6]: prefs['autorun_enabled'] Out[6]: True In [7]: prefs['autorun_enabled'] = False In [8]: prefs['autorun_enabled'] Out[8]: False """ def __init__(self, url, prefs, auth, session): self.url = url self.prefs = prefs self._is_authenticated = auth self.session = session def __getitem__(self, key): return self.prefs[key] def __setitem__(self, key, value): kwargs = {key: value} return self.set_preferences(**kwargs) def __call__(self): return self.prefs return Proxy(self.url, prefs, self._is_authenticated, self.session) def sync_main_data(self, rid=0): """ Sync the torrents main data by supplied LAST RESPONSE ID. Read more @ https://git.io/fxgB8 :param rid: Response ID of last request. """ return self._get('sync/maindata', params={'rid': rid}) def sync_peers_data(self, infohash, rid=0): """ Sync the torrent peers data by supplied LAST RESPONSE ID. Read more @ https://git.io/fxgBg :param infohash: INFO HASH of torrent. :param rid: Response ID of last request. """ params = {'hash': infohash.lower(), 'rid': rid} return self._get('sync/torrentPeers', params=params) def download_from_link(self, link, **kwargs): """ Download torrent using a link. :param link: URL Link or list of. :param savepath: Path to download the torrent. :param category: Label or Category of the torrent(s). :return: Empty JSON data. """ # old:new format old_arg_map = {'save_path': 'savepath'} # , 'label': 'category'} # convert old option names to new option names options = kwargs.copy() for old_arg, new_arg in list(old_arg_map.items()): if options.get(old_arg) and not options.get(new_arg): options[new_arg] = options[old_arg] if isinstance(link, list): options['urls'] = "\n".join(link) else: options['urls'] = link # workaround to send multipart/formdata request # http://stackoverflow.com/a/23131823/4726598 dummy_file = {'_dummy': (None, '_dummy')} return self._post('torrents/add', data=options, files=dummy_file) def download_from_file(self, file_buffer, **kwargs): """ Download torrent using a file. :param file_buffer: Single file() buffer or list of. :param save_path: Path to download the torrent. :param label: Label of the torrent(s). :return: Empty JSON data. """ if isinstance(file_buffer, list): torrent_files = {} for i, f in enumerate(file_buffer): torrent_files.update({'torrents%s' % i: f}) else: torrent_files = {'torrents': file_buffer} data = kwargs.copy() if data.get('save_path'): data.update({'savepath': data['save_path']}) return self._post('torrents/add', data=data, files=torrent_files) def add_trackers(self, infohash, trackers): """ Add trackers to a torrent. :param infohash: INFO HASH of torrent. :param trackers: Trackers. :note %0A (aka LF newline) between trackers. Ampersand in tracker urls MUST be escaped. """ data = {'hash': infohash.lower(), 'urls': trackers} return self._post('torrents/addTrackers', data=data) def set_torrent_location(self, infohash_list, location): """ Set the location for the torrent :param infohash: INFO HASH of torrent. :param location: /mnt/nfs/media. """ data = self._process_infohash_list(infohash_list) data['location'] = location return self._post('torrents/setLocation', data=data) def set_torrent_name(self, infohash, name): """ Set the name for the torrent :param infohash: INFO HASH of torrent. :param name: Whatever_name_you_want. """ data = {'hash': infohash.lower(), 'name': name} return self._post('torrents/rename', data=data) @staticmethod def _process_infohash_list(infohash_list): """ Method to convert the infohash_list to qBittorrent API friendly values. :param infohash_list: List of infohash. """ if isinstance(infohash_list, list): data = {'hashes': '|'.join([h.lower() for h in infohash_list])} else: data = {'hashes': infohash_list.lower()} return data def pause(self, infohash): """ Pause a torrent. :param infohash: INFO HASH of torrent. """ return self._post('torrents/pause', data={'hashes': infohash.lower()}) def pause_all(self): """ Pause all torrents. """ return self._post('torrents/pause', data={'hashes': 'all'}) def pause_multiple(self, infohash_list): """ Pause multiple torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/pause', data=data) def set_category(self, infohash_list, category): """ Set the category on multiple torrents. The category must exist before using set_category. As of v2.1.0,the API returns a 409 Client Error for any valid category name that doesn't already exist. :param infohash_list: Single or list() of infohashes. :param category: If category is set to empty string '', the torrent(s) specified is/are removed from all categories. """ data = self._process_infohash_list(infohash_list) data['category'] = category return self._post('torrents/setCategory', data=data) def create_category(self, category): """ Create a new category :param category: category to create """ return self._post('torrents/createCategory', data={'category': category.lower()}) def remove_category(self, categories): """ Remove categories :param categories: can contain multiple cateogies separated by \n (%0A urlencoded). """ return self._post('torrents/removeCategories', data={'categories': categories}) def resume(self, infohash): """ Resume a paused torrent. :param infohash: INFO HASH of torrent. """ return self._post('torrents/resume', data={'hashes': infohash.lower()}) def resume_all(self): """ Resume all torrents. """ return self._post('torrents/resume', data={'hashes': 'all'}) def resume_multiple(self, infohash_list): """ Resume multiple paused torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/resume', data=data) def delete(self, infohash_list): """ Delete torrents. Does not remove files. :param infohash_list: Single or list() of infohashes. """ return self._delete(infohash_list) def delete_all(self): """ Delete all torrents. Does not remove files. """ return self._delete('all') def delete_permanently(self, infohash_list): """ Permanently delete torrents. Removes files. :param infohash_list: Single or list() of infohashes. """ return self._delete(infohash_list, True) def delete_all_permanently(self): """ Permanently delete torrents. """ return self._delete('all', True) def _delete(self, infohash_list, delete_files=False): """ Delete torrents. :param infohash_list: Single or list() of infohashes. :param delete_files: Whether to delete files along with torrent. """ data = self._process_infohash_list(infohash_list) data['deleteFiles'] = json.dumps(delete_files) return self._post('torrents/delete', data=data) def recheck(self, infohash_list): """ Recheck torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/recheck', data=data) def recheck_all(self): """ Recheck all torrents. """ return self._post('torrents/recheck', data={'hashes': 'all'}) def reannounce(self, infohash_list): """ Recheck all torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/reannounce', data=data) def increase_priority(self, infohash_list): """ Increase priority of torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/increasePrio', data=data) def decrease_priority(self, infohash_list): """ Decrease priority of torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/decreasePrio', data=data) def set_max_priority(self, infohash_list): """ Set torrents to maximum priority level. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/topPrio', data=data) def set_min_priority(self, infohash_list): """ Set torrents to minimum priority level. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/bottomPrio', data=data) def set_file_priority(self, infohash, file_id, priority): """ Set file of a torrent to a supplied priority level. :param infohash: INFO HASH of torrent. :param file_id: ID of the file to set priority. :param priority: Priority level of the file. :note priority 4 is no priority set """ if priority not in [0, 1, 2, 4, 7]: raise ValueError("Invalid priority, refer WEB-UI docs for info.") elif not isinstance(file_id, int): raise TypeError("File ID must be an int") data = {'hash': infohash.lower(), 'id': file_id, 'priority': priority} return self._post('torrents/filePrio', data=data) def set_automatic_torrent_management(self, infohash_list, enable='false'): """ Set the category on multiple torrents. :param infohash_list: Single or list() of infohashes. :param enable: is a boolean, affects the torrents listed in infohash_list, default is 'false' """ data = self._process_infohash_list(infohash_list) data['enable'] = enable return self._post('torrents/setAutoManagement', data=data) # Get-set global download and upload speed limits. def get_global_download_limit(self): """ Get global download speed limit. """ return self._get('transfer/downloadLimit') def set_global_download_limit(self, limit): """ Set global download speed limit. :param limit: Speed limit in bytes. """ return self._post('transfer/setDownloadLimit', data={'limit': limit}) global_download_limit = property(get_global_download_limit, set_global_download_limit) def get_global_upload_limit(self): """ Get global upload speed limit. """ return self._get('transfer/uploadLimit') def set_global_upload_limit(self, limit): """ Set global upload speed limit. :param limit: Speed limit in bytes. """ return self._post('transfer/setUploadLimit', data={'limit': limit}) global_upload_limit = property(get_global_upload_limit, set_global_upload_limit) # Get-set download and upload speed limits of the torrents. def get_torrent_download_limit(self, infohash_list): """ Get download speed limit of the supplied torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/downloadLimit', data=data) def set_torrent_download_limit(self, infohash_list, limit): """ Set download speed limit of the supplied torrents. :param infohash_list: Single or list() of infohashes. :param limit: Speed limit in bytes. """ data = self._process_infohash_list(infohash_list) data.update({'limit': limit}) return self._post('torrents/setDownloadLimit', data=data) def get_torrent_upload_limit(self, infohash_list): """ Get upoload speed limit of the supplied torrents. :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/uploadLimit', data=data) def set_torrent_upload_limit(self, infohash_list, limit): """ Set upload speed limit of the supplied torrents. :param infohash_list: Single or list() of infohashes. :param limit: Speed limit in bytes. """ data = self._process_infohash_list(infohash_list) data.update({'limit': limit}) return self._post('torrents/setUploadLimit', data=data) # setting preferences def set_preferences(self, **kwargs): """ Set preferences of qBittorrent. Read all possible preferences @ https://git.io/fx2Y9 :param kwargs: set preferences in kwargs form. """ json_data = "json={}".format(json.dumps(kwargs)) headers = {'content-type': 'application/x-www-form-urlencoded'} return self._post('app/setPreferences', data=json_data, headers=headers) def get_alternative_speed_status(self): """ Get Alternative speed limits. (1/0) """ return self._get('transfer/speedLimitsMode') alternative_speed_status = property(get_alternative_speed_status) def toggle_alternative_speed(self): """ Toggle alternative speed limits. """ return self._get('transfer/toggleSpeedLimitsMode') def toggle_sequential_download(self, infohash_list): """ Toggle sequential download in supplied torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/toggleSequentialDownload', data=data) def toggle_first_last_piece_priority(self, infohash_list): """ Toggle first/last piece priority of supplied torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. """ data = self._process_infohash_list(infohash_list) return self._post('torrents/toggleFirstLastPiecePrio', data=data) def force_start(self, infohash_list, value): """ Force start selected torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. :param value: Force start value (bool) """ data = self._process_infohash_list(infohash_list) data.update({'value': json.dumps(value)}) return self._post('torrents/setForceStart', data=data) def set_super_seeding(self, infohash_list, value): """ Set super seeding for selected torrents. :param infohash_list: Single or list() of infohashes; pass 'all' for all torrents. :param value: Force start value (bool) """ data = self._process_infohash_list(infohash_list) data.update({'value': json.dumps(value)}) return self._post('torrents/setSuperSeeding', data=data) def set_share_ratio(self, infohash_list, ratio_limit=-2, seeding_time_limit=-2, inactive_seeding_time_limit=-2): """ Set the share ratio limit of the supplied torrents. :param infohash_list: Single or list() of infohashes. :param ratio_limit: Ratio limit (optional 2 decimals). -2 means the global limit, -1 means no limit. :param seeding_time_limit: Time limit in minutes. -2 means the global limit, -1 means no limit. :param inactive_seeding_time_limit: Time limit in minutes. -2 means the global limit, -1 means no limit. """ data = self._process_infohash_list(infohash_list) data.update({'ratioLimit': ratio_limit, 'seedingTimeLimit': seeding_time_limit, 'inactiveSeedingTimeLimit': inactive_seeding_time_limit} ) return self._post('torrents/setShareLimits', data=data)
26,319
Python
.py
628
32.272293
116
0.604679
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,427
error.py
rembo10_headphones/lib/yaml/error.py
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] class Mark: def __init__(self, name, index, line, column, buffer, pointer): self.name = name self.index = index self.line = line self.column = column self.buffer = buffer self.pointer = pointer def get_snippet(self, indent=4, max_length=75): if self.buffer is None: return None head = '' start = self.pointer while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': start -= 1 if self.pointer-start > max_length/2-1: head = ' ... ' start += 5 break tail = '' end = self.pointer while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': end += 1 if end-self.pointer > max_length/2-1: tail = ' ... ' end -= 5 break snippet = self.buffer[start:end] return ' '*indent + head + snippet + tail + '\n' \ + ' '*(indent+self.pointer-start+len(head)) + '^' def __str__(self): snippet = self.get_snippet() where = " in \"%s\", line %d, column %d" \ % (self.name, self.line+1, self.column+1) if snippet is not None: where += ":\n"+snippet return where class YAMLError(Exception): pass class MarkedYAMLError(YAMLError): def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None): self.context = context self.context_mark = context_mark self.problem = problem self.problem_mark = problem_mark self.note = note def __str__(self): lines = [] if self.context is not None: lines.append(self.context) if self.context_mark is not None \ and (self.problem is None or self.problem_mark is None or self.context_mark.name != self.problem_mark.name or self.context_mark.line != self.problem_mark.line or self.context_mark.column != self.problem_mark.column): lines.append(str(self.context_mark)) if self.problem is not None: lines.append(self.problem) if self.problem_mark is not None: lines.append(str(self.problem_mark)) if self.note is not None: lines.append(self.note) return '\n'.join(lines)
2,533
Python
.py
65
28.338462
90
0.544752
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,428
emitter.py
rembo10_headphones/lib/yaml/emitter.py
# Emitter expects events obeying the following grammar: # stream ::= STREAM-START document* STREAM-END # document ::= DOCUMENT-START node DOCUMENT-END # node ::= SCALAR | sequence | mapping # sequence ::= SEQUENCE-START node* SEQUENCE-END # mapping ::= MAPPING-START (node node)* MAPPING-END __all__ = ['Emitter', 'EmitterError'] from .error import YAMLError from .events import * class EmitterError(YAMLError): pass class ScalarAnalysis: def __init__(self, scalar, empty, multiline, allow_flow_plain, allow_block_plain, allow_single_quoted, allow_double_quoted, allow_block): self.scalar = scalar self.empty = empty self.multiline = multiline self.allow_flow_plain = allow_flow_plain self.allow_block_plain = allow_block_plain self.allow_single_quoted = allow_single_quoted self.allow_double_quoted = allow_double_quoted self.allow_block = allow_block class Emitter: DEFAULT_TAG_PREFIXES = { '!' : '!', 'tag:yaml.org,2002:' : '!!', } def __init__(self, stream, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): # The stream should have the methods `write` and possibly `flush`. self.stream = stream # Encoding can be overridden by STREAM-START. self.encoding = None # Emitter is a state machine with a stack of states to handle nested # structures. self.states = [] self.state = self.expect_stream_start # Current event and the event queue. self.events = [] self.event = None # The current indentation level and the stack of previous indents. self.indents = [] self.indent = None # Flow level. self.flow_level = 0 # Contexts. self.root_context = False self.sequence_context = False self.mapping_context = False self.simple_key_context = False # Characteristics of the last emitted character: # - current position. # - is it a whitespace? # - is it an indention character # (indentation space, '-', '?', or ':')? self.line = 0 self.column = 0 self.whitespace = True self.indention = True # Whether the document requires an explicit document indicator self.open_ended = False # Formatting details. self.canonical = canonical self.allow_unicode = allow_unicode self.best_indent = 2 if indent and 1 < indent < 10: self.best_indent = indent self.best_width = 80 if width and width > self.best_indent*2: self.best_width = width self.best_line_break = '\n' if line_break in ['\r', '\n', '\r\n']: self.best_line_break = line_break # Tag prefixes. self.tag_prefixes = None # Prepared anchor and tag. self.prepared_anchor = None self.prepared_tag = None # Scalar analysis and style. self.analysis = None self.style = None def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def emit(self, event): self.events.append(event) while not self.need_more_events(): self.event = self.events.pop(0) self.state() self.event = None # In some cases, we wait for a few next events before emitting. def need_more_events(self): if not self.events: return True event = self.events[0] if isinstance(event, DocumentStartEvent): return self.need_events(1) elif isinstance(event, SequenceStartEvent): return self.need_events(2) elif isinstance(event, MappingStartEvent): return self.need_events(3) else: return False def need_events(self, count): level = 0 for event in self.events[1:]: if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): level += 1 elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): level -= 1 elif isinstance(event, StreamEndEvent): level = -1 if level < 0: return False return (len(self.events) < count+1) def increase_indent(self, flow=False, indentless=False): self.indents.append(self.indent) if self.indent is None: if flow: self.indent = self.best_indent else: self.indent = 0 elif not indentless: self.indent += self.best_indent # States. # Stream handlers. def expect_stream_start(self): if isinstance(self.event, StreamStartEvent): if self.event.encoding and not hasattr(self.stream, 'encoding'): self.encoding = self.event.encoding self.write_stream_start() self.state = self.expect_first_document_start else: raise EmitterError("expected StreamStartEvent, but got %s" % self.event) def expect_nothing(self): raise EmitterError("expected nothing, but got %s" % self.event) # Document handlers. def expect_first_document_start(self): return self.expect_document_start(first=True) def expect_document_start(self, first=False): if isinstance(self.event, DocumentStartEvent): if (self.event.version or self.event.tags) and self.open_ended: self.write_indicator('...', True) self.write_indent() if self.event.version: version_text = self.prepare_version(self.event.version) self.write_version_directive(version_text) self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() if self.event.tags: handles = sorted(self.event.tags.keys()) for handle in handles: prefix = self.event.tags[handle] self.tag_prefixes[prefix] = handle handle_text = self.prepare_tag_handle(handle) prefix_text = self.prepare_tag_prefix(prefix) self.write_tag_directive(handle_text, prefix_text) implicit = (first and not self.event.explicit and not self.canonical and not self.event.version and not self.event.tags and not self.check_empty_document()) if not implicit: self.write_indent() self.write_indicator('---', True) if self.canonical: self.write_indent() self.state = self.expect_document_root elif isinstance(self.event, StreamEndEvent): if self.open_ended: self.write_indicator('...', True) self.write_indent() self.write_stream_end() self.state = self.expect_nothing else: raise EmitterError("expected DocumentStartEvent, but got %s" % self.event) def expect_document_end(self): if isinstance(self.event, DocumentEndEvent): self.write_indent() if self.event.explicit: self.write_indicator('...', True) self.write_indent() self.flush_stream() self.state = self.expect_document_start else: raise EmitterError("expected DocumentEndEvent, but got %s" % self.event) def expect_document_root(self): self.states.append(self.expect_document_end) self.expect_node(root=True) # Node handlers. def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): self.root_context = root self.sequence_context = sequence self.mapping_context = mapping self.simple_key_context = simple_key if isinstance(self.event, AliasEvent): self.expect_alias() elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): self.process_anchor('&') self.process_tag() if isinstance(self.event, ScalarEvent): self.expect_scalar() elif isinstance(self.event, SequenceStartEvent): if self.flow_level or self.canonical or self.event.flow_style \ or self.check_empty_sequence(): self.expect_flow_sequence() else: self.expect_block_sequence() elif isinstance(self.event, MappingStartEvent): if self.flow_level or self.canonical or self.event.flow_style \ or self.check_empty_mapping(): self.expect_flow_mapping() else: self.expect_block_mapping() else: raise EmitterError("expected NodeEvent, but got %s" % self.event) def expect_alias(self): if self.event.anchor is None: raise EmitterError("anchor is not specified for alias") self.process_anchor('*') self.state = self.states.pop() def expect_scalar(self): self.increase_indent(flow=True) self.process_scalar() self.indent = self.indents.pop() self.state = self.states.pop() # Flow sequence handlers. def expect_flow_sequence(self): self.write_indicator('[', True, whitespace=True) self.flow_level += 1 self.increase_indent(flow=True) self.state = self.expect_first_flow_sequence_item def expect_first_flow_sequence_item(self): if isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 self.write_indicator(']', False) self.state = self.states.pop() else: if self.canonical or self.column > self.best_width: self.write_indent() self.states.append(self.expect_flow_sequence_item) self.expect_node(sequence=True) def expect_flow_sequence_item(self): if isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 if self.canonical: self.write_indicator(',', False) self.write_indent() self.write_indicator(']', False) self.state = self.states.pop() else: self.write_indicator(',', False) if self.canonical or self.column > self.best_width: self.write_indent() self.states.append(self.expect_flow_sequence_item) self.expect_node(sequence=True) # Flow mapping handlers. def expect_flow_mapping(self): self.write_indicator('{', True, whitespace=True) self.flow_level += 1 self.increase_indent(flow=True) self.state = self.expect_first_flow_mapping_key def expect_first_flow_mapping_key(self): if isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 self.write_indicator('}', False) self.state = self.states.pop() else: if self.canonical or self.column > self.best_width: self.write_indent() if not self.canonical and self.check_simple_key(): self.states.append(self.expect_flow_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True) self.states.append(self.expect_flow_mapping_value) self.expect_node(mapping=True) def expect_flow_mapping_key(self): if isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.flow_level -= 1 if self.canonical: self.write_indicator(',', False) self.write_indent() self.write_indicator('}', False) self.state = self.states.pop() else: self.write_indicator(',', False) if self.canonical or self.column > self.best_width: self.write_indent() if not self.canonical and self.check_simple_key(): self.states.append(self.expect_flow_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True) self.states.append(self.expect_flow_mapping_value) self.expect_node(mapping=True) def expect_flow_mapping_simple_value(self): self.write_indicator(':', False) self.states.append(self.expect_flow_mapping_key) self.expect_node(mapping=True) def expect_flow_mapping_value(self): if self.canonical or self.column > self.best_width: self.write_indent() self.write_indicator(':', True) self.states.append(self.expect_flow_mapping_key) self.expect_node(mapping=True) # Block sequence handlers. def expect_block_sequence(self): indentless = (self.mapping_context and not self.indention) self.increase_indent(flow=False, indentless=indentless) self.state = self.expect_first_block_sequence_item def expect_first_block_sequence_item(self): return self.expect_block_sequence_item(first=True) def expect_block_sequence_item(self, first=False): if not first and isinstance(self.event, SequenceEndEvent): self.indent = self.indents.pop() self.state = self.states.pop() else: self.write_indent() self.write_indicator('-', True, indention=True) self.states.append(self.expect_block_sequence_item) self.expect_node(sequence=True) # Block mapping handlers. def expect_block_mapping(self): self.increase_indent(flow=False) self.state = self.expect_first_block_mapping_key def expect_first_block_mapping_key(self): return self.expect_block_mapping_key(first=True) def expect_block_mapping_key(self, first=False): if not first and isinstance(self.event, MappingEndEvent): self.indent = self.indents.pop() self.state = self.states.pop() else: self.write_indent() if self.check_simple_key(): self.states.append(self.expect_block_mapping_simple_value) self.expect_node(mapping=True, simple_key=True) else: self.write_indicator('?', True, indention=True) self.states.append(self.expect_block_mapping_value) self.expect_node(mapping=True) def expect_block_mapping_simple_value(self): self.write_indicator(':', False) self.states.append(self.expect_block_mapping_key) self.expect_node(mapping=True) def expect_block_mapping_value(self): self.write_indent() self.write_indicator(':', True, indention=True) self.states.append(self.expect_block_mapping_key) self.expect_node(mapping=True) # Checkers. def check_empty_sequence(self): return (isinstance(self.event, SequenceStartEvent) and self.events and isinstance(self.events[0], SequenceEndEvent)) def check_empty_mapping(self): return (isinstance(self.event, MappingStartEvent) and self.events and isinstance(self.events[0], MappingEndEvent)) def check_empty_document(self): if not isinstance(self.event, DocumentStartEvent) or not self.events: return False event = self.events[0] return (isinstance(event, ScalarEvent) and event.anchor is None and event.tag is None and event.implicit and event.value == '') def check_simple_key(self): length = 0 if isinstance(self.event, NodeEvent) and self.event.anchor is not None: if self.prepared_anchor is None: self.prepared_anchor = self.prepare_anchor(self.event.anchor) length += len(self.prepared_anchor) if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ and self.event.tag is not None: if self.prepared_tag is None: self.prepared_tag = self.prepare_tag(self.event.tag) length += len(self.prepared_tag) if isinstance(self.event, ScalarEvent): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) length += len(self.analysis.scalar) return (length < 128 and (isinstance(self.event, AliasEvent) or (isinstance(self.event, ScalarEvent) and not self.analysis.empty and not self.analysis.multiline) or self.check_empty_sequence() or self.check_empty_mapping())) # Anchor, Tag, and Scalar processors. def process_anchor(self, indicator): if self.event.anchor is None: self.prepared_anchor = None return if self.prepared_anchor is None: self.prepared_anchor = self.prepare_anchor(self.event.anchor) if self.prepared_anchor: self.write_indicator(indicator+self.prepared_anchor, True) self.prepared_anchor = None def process_tag(self): tag = self.event.tag if isinstance(self.event, ScalarEvent): if self.style is None: self.style = self.choose_scalar_style() if ((not self.canonical or tag is None) and ((self.style == '' and self.event.implicit[0]) or (self.style != '' and self.event.implicit[1]))): self.prepared_tag = None return if self.event.implicit[0] and tag is None: tag = '!' self.prepared_tag = None else: if (not self.canonical or tag is None) and self.event.implicit: self.prepared_tag = None return if tag is None: raise EmitterError("tag is not specified") if self.prepared_tag is None: self.prepared_tag = self.prepare_tag(tag) if self.prepared_tag: self.write_indicator(self.prepared_tag, True) self.prepared_tag = None def choose_scalar_style(self): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) if self.event.style == '"' or self.canonical: return '"' if not self.event.style and self.event.implicit[0]: if (not (self.simple_key_context and (self.analysis.empty or self.analysis.multiline)) and (self.flow_level and self.analysis.allow_flow_plain or (not self.flow_level and self.analysis.allow_block_plain))): return '' if self.event.style and self.event.style in '|>': if (not self.flow_level and not self.simple_key_context and self.analysis.allow_block): return self.event.style if not self.event.style or self.event.style == '\'': if (self.analysis.allow_single_quoted and not (self.simple_key_context and self.analysis.multiline)): return '\'' return '"' def process_scalar(self): if self.analysis is None: self.analysis = self.analyze_scalar(self.event.value) if self.style is None: self.style = self.choose_scalar_style() split = (not self.simple_key_context) #if self.analysis.multiline and split \ # and (not self.style or self.style in '\'\"'): # self.write_indent() if self.style == '"': self.write_double_quoted(self.analysis.scalar, split) elif self.style == '\'': self.write_single_quoted(self.analysis.scalar, split) elif self.style == '>': self.write_folded(self.analysis.scalar) elif self.style == '|': self.write_literal(self.analysis.scalar) else: self.write_plain(self.analysis.scalar, split) self.analysis = None self.style = None # Analyzers. def prepare_version(self, version): major, minor = version if major != 1: raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) return '%d.%d' % (major, minor) def prepare_tag_handle(self, handle): if not handle: raise EmitterError("tag handle must not be empty") if handle[0] != '!' or handle[-1] != '!': raise EmitterError("tag handle must start and end with '!': %r" % handle) for ch in handle[1:-1]: if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_'): raise EmitterError("invalid character %r in the tag handle: %r" % (ch, handle)) return handle def prepare_tag_prefix(self, prefix): if not prefix: raise EmitterError("tag prefix must not be empty") chunks = [] start = end = 0 if prefix[0] == '!': end = 1 while end < len(prefix): ch = prefix[end] if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?!:@&=+$,_.~*\'()[]': end += 1 else: if start < end: chunks.append(prefix[start:end]) start = end = end+1 data = ch.encode('utf-8') for ch in data: chunks.append('%%%02X' % ord(ch)) if start < end: chunks.append(prefix[start:end]) return ''.join(chunks) def prepare_tag(self, tag): if not tag: raise EmitterError("tag must not be empty") if tag == '!': return tag handle = None suffix = tag prefixes = sorted(self.tag_prefixes.keys()) for prefix in prefixes: if tag.startswith(prefix) \ and (prefix == '!' or len(prefix) < len(tag)): handle = self.tag_prefixes[prefix] suffix = tag[len(prefix):] chunks = [] start = end = 0 while end < len(suffix): ch = suffix[end] if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?:@&=+$,_.~*\'()[]' \ or (ch == '!' and handle != '!'): end += 1 else: if start < end: chunks.append(suffix[start:end]) start = end = end+1 data = ch.encode('utf-8') for ch in data: chunks.append('%%%02X' % ch) if start < end: chunks.append(suffix[start:end]) suffix_text = ''.join(chunks) if handle: return '%s%s' % (handle, suffix_text) else: return '!<%s>' % suffix_text def prepare_anchor(self, anchor): if not anchor: raise EmitterError("anchor must not be empty") for ch in anchor: if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_'): raise EmitterError("invalid character %r in the anchor: %r" % (ch, anchor)) return anchor def analyze_scalar(self, scalar): # Empty scalar is a special case. if not scalar: return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, allow_flow_plain=False, allow_block_plain=True, allow_single_quoted=True, allow_double_quoted=True, allow_block=False) # Indicators and special characters. block_indicators = False flow_indicators = False line_breaks = False special_characters = False # Important whitespace combinations. leading_space = False leading_break = False trailing_space = False trailing_break = False break_space = False space_break = False # Check document indicators. if scalar.startswith('---') or scalar.startswith('...'): block_indicators = True flow_indicators = True # First character or preceded by a whitespace. preceded_by_whitespace = True # Last character or followed by a whitespace. followed_by_whitespace = (len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029') # The previous character is a space. previous_space = False # The previous character is a break. previous_break = False index = 0 while index < len(scalar): ch = scalar[index] # Check for indicators. if index == 0: # Leading indicators are special characters. if ch in '#,[]{}&*!|>\'\"%@`': flow_indicators = True block_indicators = True if ch in '?:': flow_indicators = True if followed_by_whitespace: block_indicators = True if ch == '-' and followed_by_whitespace: flow_indicators = True block_indicators = True else: # Some indicators cannot appear within a scalar as well. if ch in ',?[]{}': flow_indicators = True if ch == ':': flow_indicators = True if followed_by_whitespace: block_indicators = True if ch == '#' and preceded_by_whitespace: flow_indicators = True block_indicators = True # Check for line breaks, special, and unicode characters. if ch in '\n\x85\u2028\u2029': line_breaks = True if not (ch == '\n' or '\x20' <= ch <= '\x7E'): if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD' or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF': unicode_characters = True if not self.allow_unicode: special_characters = True else: special_characters = True # Detect important whitespace combinations. if ch == ' ': if index == 0: leading_space = True if index == len(scalar)-1: trailing_space = True if previous_break: break_space = True previous_space = True previous_break = False elif ch in '\n\x85\u2028\u2029': if index == 0: leading_break = True if index == len(scalar)-1: trailing_break = True if previous_space: space_break = True previous_space = False previous_break = True else: previous_space = False previous_break = False # Prepare for the next character. index += 1 preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') followed_by_whitespace = (index+1 >= len(scalar) or scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') # Let's decide what styles are allowed. allow_flow_plain = True allow_block_plain = True allow_single_quoted = True allow_double_quoted = True allow_block = True # Leading and trailing whitespaces are bad for plain scalars. if (leading_space or leading_break or trailing_space or trailing_break): allow_flow_plain = allow_block_plain = False # We do not permit trailing spaces for block scalars. if trailing_space: allow_block = False # Spaces at the beginning of a new line are only acceptable for block # scalars. if break_space: allow_flow_plain = allow_block_plain = allow_single_quoted = False # Spaces followed by breaks, as well as special character are only # allowed for double quoted scalars. if space_break or special_characters: allow_flow_plain = allow_block_plain = \ allow_single_quoted = allow_block = False # Although the plain scalar writer supports breaks, we never emit # multiline plain scalars. if line_breaks: allow_flow_plain = allow_block_plain = False # Flow indicators are forbidden for flow plain scalars. if flow_indicators: allow_flow_plain = False # Block indicators are forbidden for block plain scalars. if block_indicators: allow_block_plain = False return ScalarAnalysis(scalar=scalar, empty=False, multiline=line_breaks, allow_flow_plain=allow_flow_plain, allow_block_plain=allow_block_plain, allow_single_quoted=allow_single_quoted, allow_double_quoted=allow_double_quoted, allow_block=allow_block) # Writers. def flush_stream(self): if hasattr(self.stream, 'flush'): self.stream.flush() def write_stream_start(self): # Write BOM if needed. if self.encoding and self.encoding.startswith('utf-16'): self.stream.write('\uFEFF'.encode(self.encoding)) def write_stream_end(self): self.flush_stream() def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): if self.whitespace or not need_whitespace: data = indicator else: data = ' '+indicator self.whitespace = whitespace self.indention = self.indention and indention self.column += len(data) self.open_ended = False if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_indent(self): indent = self.indent or 0 if not self.indention or self.column > indent \ or (self.column == indent and not self.whitespace): self.write_line_break() if self.column < indent: self.whitespace = True data = ' '*(indent-self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_line_break(self, data=None): if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) def write_version_directive(self, version_text): data = '%%YAML %s' % version_text if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_line_break() def write_tag_directive(self, handle_text, prefix_text): data = '%%TAG %s %s' % (handle_text, prefix_text) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_line_break() # Scalar streams. def write_single_quoted(self, text, split=True): self.write_indicator('\'', True) spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch is None or ch != ' ': if start+1 == end and self.column > self.best_width and split \ and start != 0 and end != len(text): self.write_indent() else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch is None or ch not in '\n\x85\u2028\u2029': if text[start] == '\n': self.write_line_break() for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) self.write_indent() start = end else: if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': if start < end: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch == '\'': data = '\'\'' self.column += 2 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end + 1 if ch is not None: spaces = (ch == ' ') breaks = (ch in '\n\x85\u2028\u2029') end += 1 self.write_indicator('\'', False) ESCAPE_REPLACEMENTS = { '\0': '0', '\x07': 'a', '\x08': 'b', '\x09': 't', '\x0A': 'n', '\x0B': 'v', '\x0C': 'f', '\x0D': 'r', '\x1B': 'e', '\"': '\"', '\\': '\\', '\x85': 'N', '\xA0': '_', '\u2028': 'L', '\u2029': 'P', } def write_double_quoted(self, text, split=True): self.write_indicator('"', True) start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ or not ('\x20' <= ch <= '\x7E' or (self.allow_unicode and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD'))): if start < end: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: if ch in self.ESCAPE_REPLACEMENTS: data = '\\'+self.ESCAPE_REPLACEMENTS[ch] elif ch <= '\xFF': data = '\\x%02X' % ord(ch) elif ch <= '\uFFFF': data = '\\u%04X' % ord(ch) else: data = '\\U%08X' % ord(ch) self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end+1 if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ and self.column+(end-start) > self.best_width and split: data = text[start:end]+'\\' if start < end: start = end self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.write_indent() self.whitespace = False self.indention = False if text[start] == ' ': data = '\\' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) end += 1 self.write_indicator('"', False) def determine_block_hints(self, text): hints = '' if text: if text[0] in ' \n\x85\u2028\u2029': hints += str(self.best_indent) if text[-1] not in '\n\x85\u2028\u2029': hints += '-' elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': hints += '+' return hints def write_folded(self, text): hints = self.determine_block_hints(text) self.write_indicator('>'+hints, True) if hints[-1:] == '+': self.open_ended = True self.write_line_break() leading_space = True spaces = False breaks = True start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if breaks: if ch is None or ch not in '\n\x85\u2028\u2029': if not leading_space and ch is not None and ch != ' ' \ and text[start] == '\n': self.write_line_break() leading_space = (ch == ' ') for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) if ch is not None: self.write_indent() start = end elif spaces: if ch != ' ': if start+1 == end and self.column > self.best_width: self.write_indent() else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end else: if ch is None or ch in ' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) if ch is None: self.write_line_break() start = end if ch is not None: breaks = (ch in '\n\x85\u2028\u2029') spaces = (ch == ' ') end += 1 def write_literal(self, text): hints = self.determine_block_hints(text) self.write_indicator('|'+hints, True) if hints[-1:] == '+': self.open_ended = True self.write_line_break() breaks = True start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if breaks: if ch is None or ch not in '\n\x85\u2028\u2029': for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) if ch is not None: self.write_indent() start = end else: if ch is None or ch in '\n\x85\u2028\u2029': data = text[start:end] if self.encoding: data = data.encode(self.encoding) self.stream.write(data) if ch is None: self.write_line_break() start = end if ch is not None: breaks = (ch in '\n\x85\u2028\u2029') end += 1 def write_plain(self, text, split=True): if self.root_context: self.open_ended = True if not text: return if not self.whitespace: data = ' ' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch != ' ': if start+1 == end and self.column > self.best_width and split: self.write_indent() self.whitespace = False self.indention = False else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch not in '\n\x85\u2028\u2029': if text[start] == '\n': self.write_line_break() for br in text[start:end]: if br == '\n': self.write_line_break() else: self.write_line_break(br) self.write_indent() self.whitespace = False self.indention = False start = end else: if ch is None or ch in ' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: spaces = (ch == ' ') breaks = (ch in '\n\x85\u2028\u2029') end += 1
43,006
Python
.py
1,021
28.627816
85
0.523156
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,429
serializer.py
rembo10_headphones/lib/yaml/serializer.py
__all__ = ['Serializer', 'SerializerError'] from .error import YAMLError from .events import * from .nodes import * class SerializerError(YAMLError): pass class Serializer: ANCHOR_TEMPLATE = 'id%03d' def __init__(self, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): self.use_encoding = encoding self.use_explicit_start = explicit_start self.use_explicit_end = explicit_end self.use_version = version self.use_tags = tags self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 self.closed = None def open(self): if self.closed is None: self.emit(StreamStartEvent(encoding=self.use_encoding)) self.closed = False elif self.closed: raise SerializerError("serializer is closed") else: raise SerializerError("serializer is already opened") def close(self): if self.closed is None: raise SerializerError("serializer is not opened") elif not self.closed: self.emit(StreamEndEvent()) self.closed = True #def __del__(self): # self.close() def serialize(self, node): if self.closed is None: raise SerializerError("serializer is not opened") elif self.closed: raise SerializerError("serializer is closed") self.emit(DocumentStartEvent(explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags)) self.anchor_node(node) self.serialize_node(node, None, None) self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 def anchor_node(self, node): if node in self.anchors: if self.anchors[node] is None: self.anchors[node] = self.generate_anchor(node) else: self.anchors[node] = None if isinstance(node, SequenceNode): for item in node.value: self.anchor_node(item) elif isinstance(node, MappingNode): for key, value in node.value: self.anchor_node(key) self.anchor_node(value) def generate_anchor(self, node): self.last_anchor_id += 1 return self.ANCHOR_TEMPLATE % self.last_anchor_id def serialize_node(self, node, parent, index): alias = self.anchors[node] if node in self.serialized_nodes: self.emit(AliasEvent(alias)) else: self.serialized_nodes[node] = True self.descend_resolver(parent, index) if isinstance(node, ScalarNode): detected_tag = self.resolve(ScalarNode, node.value, (True, False)) default_tag = self.resolve(ScalarNode, node.value, (False, True)) implicit = (node.tag == detected_tag), (node.tag == default_tag) self.emit(ScalarEvent(alias, node.tag, implicit, node.value, style=node.style)) elif isinstance(node, SequenceNode): implicit = (node.tag == self.resolve(SequenceNode, node.value, True)) self.emit(SequenceStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) index = 0 for item in node.value: self.serialize_node(item, node, index) index += 1 self.emit(SequenceEndEvent()) elif isinstance(node, MappingNode): implicit = (node.tag == self.resolve(MappingNode, node.value, True)) self.emit(MappingStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) for key, value in node.value: self.serialize_node(key, node, None) self.serialize_node(value, node, key) self.emit(MappingEndEvent()) self.ascend_resolver()
4,165
Python
.py
97
30.865979
82
0.581401
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,430
nodes.py
rembo10_headphones/lib/yaml/nodes.py
class Node(object): def __init__(self, tag, value, start_mark, end_mark): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): value = self.value #if isinstance(value, list): # if len(value) == 0: # value = '<empty>' # elif len(value) == 1: # value = '<1 item>' # else: # value = '<%d items>' % len(value) #else: # if len(value) > 75: # value = repr(value[:70]+u' ... ') # else: # value = repr(value) value = repr(value) return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) class ScalarNode(Node): id = 'scalar' def __init__(self, tag, value, start_mark=None, end_mark=None, style=None): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class CollectionNode(Node): def __init__(self, tag, value, start_mark=None, end_mark=None, flow_style=None): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class SequenceNode(CollectionNode): id = 'sequence' class MappingNode(CollectionNode): id = 'mapping'
1,440
Python
.py
43
25.744186
82
0.529116
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,431
constructor.py
rembo10_headphones/lib/yaml/constructor.py
__all__ = [ 'BaseConstructor', 'SafeConstructor', 'FullConstructor', 'UnsafeConstructor', 'Constructor', 'ConstructorError' ] from .error import * from .nodes import * import collections.abc, datetime, base64, binascii, re, sys, types class ConstructorError(MarkedYAMLError): pass class BaseConstructor: yaml_constructors = {} yaml_multi_constructors = {} def __init__(self): self.constructed_objects = {} self.recursive_objects = {} self.state_generators = [] self.deep_construct = False def check_data(self): # If there are more documents available? return self.check_node() def check_state_key(self, key): """Block special attributes/methods from being set in a newly created object, to prevent user-controlled methods from being called during deserialization""" if self.get_state_keys_blacklist_regexp().match(key): raise ConstructorError(None, None, "blacklisted key '%s' in instance state found" % (key,), None) def get_data(self): # Construct and return the next document. if self.check_node(): return self.construct_document(self.get_node()) def get_single_data(self): # Ensure that the stream contains a single document and construct it. node = self.get_single_node() if node is not None: return self.construct_document(node) return None def construct_document(self, node): data = self.construct_object(node) while self.state_generators: state_generators = self.state_generators self.state_generators = [] for generator in state_generators: for dummy in generator: pass self.constructed_objects = {} self.recursive_objects = {} self.deep_construct = False return data def construct_object(self, node, deep=False): if node in self.constructed_objects: return self.constructed_objects[node] if deep: old_deep = self.deep_construct self.deep_construct = True if node in self.recursive_objects: raise ConstructorError(None, None, "found unconstructable recursive node", node.start_mark) self.recursive_objects[node] = None constructor = None tag_suffix = None if node.tag in self.yaml_constructors: constructor = self.yaml_constructors[node.tag] else: for tag_prefix in self.yaml_multi_constructors: if tag_prefix is not None and node.tag.startswith(tag_prefix): tag_suffix = node.tag[len(tag_prefix):] constructor = self.yaml_multi_constructors[tag_prefix] break else: if None in self.yaml_multi_constructors: tag_suffix = node.tag constructor = self.yaml_multi_constructors[None] elif None in self.yaml_constructors: constructor = self.yaml_constructors[None] elif isinstance(node, ScalarNode): constructor = self.__class__.construct_scalar elif isinstance(node, SequenceNode): constructor = self.__class__.construct_sequence elif isinstance(node, MappingNode): constructor = self.__class__.construct_mapping if tag_suffix is None: data = constructor(self, node) else: data = constructor(self, tag_suffix, node) if isinstance(data, types.GeneratorType): generator = data data = next(generator) if self.deep_construct: for dummy in generator: pass else: self.state_generators.append(generator) self.constructed_objects[node] = data del self.recursive_objects[node] if deep: self.deep_construct = old_deep return data def construct_scalar(self, node): if not isinstance(node, ScalarNode): raise ConstructorError(None, None, "expected a scalar node, but found %s" % node.id, node.start_mark) return node.value def construct_sequence(self, node, deep=False): if not isinstance(node, SequenceNode): raise ConstructorError(None, None, "expected a sequence node, but found %s" % node.id, node.start_mark) return [self.construct_object(child, deep=deep) for child in node.value] def construct_mapping(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = {} for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) if not isinstance(key, collections.abc.Hashable): raise ConstructorError("while constructing a mapping", node.start_mark, "found unhashable key", key_node.start_mark) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping def construct_pairs(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError(None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) pairs = [] for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) value = self.construct_object(value_node, deep=deep) pairs.append((key, value)) return pairs @classmethod def add_constructor(cls, tag, constructor): if not 'yaml_constructors' in cls.__dict__: cls.yaml_constructors = cls.yaml_constructors.copy() cls.yaml_constructors[tag] = constructor @classmethod def add_multi_constructor(cls, tag_prefix, multi_constructor): if not 'yaml_multi_constructors' in cls.__dict__: cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() cls.yaml_multi_constructors[tag_prefix] = multi_constructor class SafeConstructor(BaseConstructor): def construct_scalar(self, node): if isinstance(node, MappingNode): for key_node, value_node in node.value: if key_node.tag == 'tag:yaml.org,2002:value': return self.construct_scalar(value_node) return super().construct_scalar(node) def flatten_mapping(self, node): merge = [] index = 0 while index < len(node.value): key_node, value_node = node.value[index] if key_node.tag == 'tag:yaml.org,2002:merge': del node.value[index] if isinstance(value_node, MappingNode): self.flatten_mapping(value_node) merge.extend(value_node.value) elif isinstance(value_node, SequenceNode): submerge = [] for subnode in value_node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing a mapping", node.start_mark, "expected a mapping for merging, but found %s" % subnode.id, subnode.start_mark) self.flatten_mapping(subnode) submerge.append(subnode.value) submerge.reverse() for value in submerge: merge.extend(value) else: raise ConstructorError("while constructing a mapping", node.start_mark, "expected a mapping or list of mappings for merging, but found %s" % value_node.id, value_node.start_mark) elif key_node.tag == 'tag:yaml.org,2002:value': key_node.tag = 'tag:yaml.org,2002:str' index += 1 else: index += 1 if merge: node.value = merge + node.value def construct_mapping(self, node, deep=False): if isinstance(node, MappingNode): self.flatten_mapping(node) return super().construct_mapping(node, deep=deep) def construct_yaml_null(self, node): self.construct_scalar(node) return None bool_values = { 'yes': True, 'no': False, 'true': True, 'false': False, 'on': True, 'off': False, } def construct_yaml_bool(self, node): value = self.construct_scalar(node) return self.bool_values[value.lower()] def construct_yaml_int(self, node): value = self.construct_scalar(node) value = value.replace('_', '') sign = +1 if value[0] == '-': sign = -1 if value[0] in '+-': value = value[1:] if value == '0': return 0 elif value.startswith('0b'): return sign*int(value[2:], 2) elif value.startswith('0x'): return sign*int(value[2:], 16) elif value[0] == '0': return sign*int(value, 8) elif ':' in value: digits = [int(part) for part in value.split(':')] digits.reverse() base = 1 value = 0 for digit in digits: value += digit*base base *= 60 return sign*value else: return sign*int(value) inf_value = 1e300 while inf_value != inf_value*inf_value: inf_value *= inf_value nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). def construct_yaml_float(self, node): value = self.construct_scalar(node) value = value.replace('_', '').lower() sign = +1 if value[0] == '-': sign = -1 if value[0] in '+-': value = value[1:] if value == '.inf': return sign*self.inf_value elif value == '.nan': return self.nan_value elif ':' in value: digits = [float(part) for part in value.split(':')] digits.reverse() base = 1 value = 0.0 for digit in digits: value += digit*base base *= 60 return sign*value else: return sign*float(value) def construct_yaml_binary(self, node): try: value = self.construct_scalar(node).encode('ascii') except UnicodeEncodeError as exc: raise ConstructorError(None, None, "failed to convert base64 data into ascii: %s" % exc, node.start_mark) try: if hasattr(base64, 'decodebytes'): return base64.decodebytes(value) else: return base64.decodestring(value) except binascii.Error as exc: raise ConstructorError(None, None, "failed to decode base64 data: %s" % exc, node.start_mark) timestamp_regexp = re.compile( r'''^(?P<year>[0-9][0-9][0-9][0-9]) -(?P<month>[0-9][0-9]?) -(?P<day>[0-9][0-9]?) (?:(?:[Tt]|[ \t]+) (?P<hour>[0-9][0-9]?) :(?P<minute>[0-9][0-9]) :(?P<second>[0-9][0-9]) (?:\.(?P<fraction>[0-9]*))? (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?) (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X) def construct_yaml_timestamp(self, node): value = self.construct_scalar(node) match = self.timestamp_regexp.match(node.value) values = match.groupdict() year = int(values['year']) month = int(values['month']) day = int(values['day']) if not values['hour']: return datetime.date(year, month, day) hour = int(values['hour']) minute = int(values['minute']) second = int(values['second']) fraction = 0 tzinfo = None if values['fraction']: fraction = values['fraction'][:6] while len(fraction) < 6: fraction += '0' fraction = int(fraction) if values['tz_sign']: tz_hour = int(values['tz_hour']) tz_minute = int(values['tz_minute'] or 0) delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) if values['tz_sign'] == '-': delta = -delta tzinfo = datetime.timezone(delta) elif values['tz']: tzinfo = datetime.timezone.utc return datetime.datetime(year, month, day, hour, minute, second, fraction, tzinfo=tzinfo) def construct_yaml_omap(self, node): # Note: we do not check for duplicate keys, because it's too # CPU-expensive. omap = [] yield omap if not isinstance(node, SequenceNode): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a mapping of length 1, but found %s" % subnode.id, subnode.start_mark) if len(subnode.value) != 1: raise ConstructorError("while constructing an ordered map", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) key_node, value_node = subnode.value[0] key = self.construct_object(key_node) value = self.construct_object(value_node) omap.append((key, value)) def construct_yaml_pairs(self, node): # Note: the same code as `construct_yaml_omap`. pairs = [] yield pairs if not isinstance(node, SequenceNode): raise ConstructorError("while constructing pairs", node.start_mark, "expected a sequence, but found %s" % node.id, node.start_mark) for subnode in node.value: if not isinstance(subnode, MappingNode): raise ConstructorError("while constructing pairs", node.start_mark, "expected a mapping of length 1, but found %s" % subnode.id, subnode.start_mark) if len(subnode.value) != 1: raise ConstructorError("while constructing pairs", node.start_mark, "expected a single mapping item, but found %d items" % len(subnode.value), subnode.start_mark) key_node, value_node = subnode.value[0] key = self.construct_object(key_node) value = self.construct_object(value_node) pairs.append((key, value)) def construct_yaml_set(self, node): data = set() yield data value = self.construct_mapping(node) data.update(value) def construct_yaml_str(self, node): return self.construct_scalar(node) def construct_yaml_seq(self, node): data = [] yield data data.extend(self.construct_sequence(node)) def construct_yaml_map(self, node): data = {} yield data value = self.construct_mapping(node) data.update(value) def construct_yaml_object(self, node, cls): data = cls.__new__(cls) yield data if hasattr(data, '__setstate__'): state = self.construct_mapping(node, deep=True) data.__setstate__(state) else: state = self.construct_mapping(node) data.__dict__.update(state) def construct_undefined(self, node): raise ConstructorError(None, None, "could not determine a constructor for the tag %r" % node.tag, node.start_mark) SafeConstructor.add_constructor( 'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null) SafeConstructor.add_constructor( 'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool) SafeConstructor.add_constructor( 'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int) SafeConstructor.add_constructor( 'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float) SafeConstructor.add_constructor( 'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary) SafeConstructor.add_constructor( 'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp) SafeConstructor.add_constructor( 'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap) SafeConstructor.add_constructor( 'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs) SafeConstructor.add_constructor( 'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set) SafeConstructor.add_constructor( 'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str) SafeConstructor.add_constructor( 'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq) SafeConstructor.add_constructor( 'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map) SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) class FullConstructor(SafeConstructor): # 'extend' is blacklisted because it is used by # construct_python_object_apply to add `listitems` to a newly generate # python instance def get_state_keys_blacklist(self): return ['^extend$', '^__.*__$'] def get_state_keys_blacklist_regexp(self): if not hasattr(self, 'state_keys_blacklist_regexp'): self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')') return self.state_keys_blacklist_regexp def construct_python_str(self, node): return self.construct_scalar(node) def construct_python_unicode(self, node): return self.construct_scalar(node) def construct_python_bytes(self, node): try: value = self.construct_scalar(node).encode('ascii') except UnicodeEncodeError as exc: raise ConstructorError(None, None, "failed to convert base64 data into ascii: %s" % exc, node.start_mark) try: if hasattr(base64, 'decodebytes'): return base64.decodebytes(value) else: return base64.decodestring(value) except binascii.Error as exc: raise ConstructorError(None, None, "failed to decode base64 data: %s" % exc, node.start_mark) def construct_python_long(self, node): return self.construct_yaml_int(node) def construct_python_complex(self, node): return complex(self.construct_scalar(node)) def construct_python_tuple(self, node): return tuple(self.construct_sequence(node)) def find_python_module(self, name, mark, unsafe=False): if not name: raise ConstructorError("while constructing a Python module", mark, "expected non-empty name appended to the tag", mark) if unsafe: try: __import__(name) except ImportError as exc: raise ConstructorError("while constructing a Python module", mark, "cannot find module %r (%s)" % (name, exc), mark) if name not in sys.modules: raise ConstructorError("while constructing a Python module", mark, "module %r is not imported" % name, mark) return sys.modules[name] def find_python_name(self, name, mark, unsafe=False): if not name: raise ConstructorError("while constructing a Python object", mark, "expected non-empty name appended to the tag", mark) if '.' in name: module_name, object_name = name.rsplit('.', 1) else: module_name = 'builtins' object_name = name if unsafe: try: __import__(module_name) except ImportError as exc: raise ConstructorError("while constructing a Python object", mark, "cannot find module %r (%s)" % (module_name, exc), mark) if module_name not in sys.modules: raise ConstructorError("while constructing a Python object", mark, "module %r is not imported" % module_name, mark) module = sys.modules[module_name] if not hasattr(module, object_name): raise ConstructorError("while constructing a Python object", mark, "cannot find %r in the module %r" % (object_name, module.__name__), mark) return getattr(module, object_name) def construct_python_name(self, suffix, node): value = self.construct_scalar(node) if value: raise ConstructorError("while constructing a Python name", node.start_mark, "expected the empty value, but found %r" % value, node.start_mark) return self.find_python_name(suffix, node.start_mark) def construct_python_module(self, suffix, node): value = self.construct_scalar(node) if value: raise ConstructorError("while constructing a Python module", node.start_mark, "expected the empty value, but found %r" % value, node.start_mark) return self.find_python_module(suffix, node.start_mark) def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False, unsafe=False): if not args: args = [] if not kwds: kwds = {} cls = self.find_python_name(suffix, node.start_mark) if not (unsafe or isinstance(cls, type)): raise ConstructorError("while constructing a Python instance", node.start_mark, "expected a class, but found %r" % type(cls), node.start_mark) if newobj and isinstance(cls, type): return cls.__new__(cls, *args, **kwds) else: return cls(*args, **kwds) def set_python_instance_state(self, instance, state, unsafe=False): if hasattr(instance, '__setstate__'): instance.__setstate__(state) else: slotstate = {} if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if hasattr(instance, '__dict__'): if not unsafe and state: for key in state.keys(): self.check_state_key(key) instance.__dict__.update(state) elif state: slotstate.update(state) for key, value in slotstate.items(): if not unsafe: self.check_state_key(key) setattr(instance, key, value) def construct_python_object(self, suffix, node): # Format: # !!python/object:module.name { ... state ... } instance = self.make_python_instance(suffix, node, newobj=True) yield instance deep = hasattr(instance, '__setstate__') state = self.construct_mapping(node, deep=deep) self.set_python_instance_state(instance, state) def construct_python_object_apply(self, suffix, node, newobj=False): # Format: # !!python/object/apply # (or !!python/object/new) # args: [ ... arguments ... ] # kwds: { ... keywords ... } # state: ... state ... # listitems: [ ... listitems ... ] # dictitems: { ... dictitems ... } # or short format: # !!python/object/apply [ ... arguments ... ] # The difference between !!python/object/apply and !!python/object/new # is how an object is created, check make_python_instance for details. if isinstance(node, SequenceNode): args = self.construct_sequence(node, deep=True) kwds = {} state = {} listitems = [] dictitems = {} else: value = self.construct_mapping(node, deep=True) args = value.get('args', []) kwds = value.get('kwds', {}) state = value.get('state', {}) listitems = value.get('listitems', []) dictitems = value.get('dictitems', {}) instance = self.make_python_instance(suffix, node, args, kwds, newobj) if state: self.set_python_instance_state(instance, state) if listitems: instance.extend(listitems) if dictitems: for key in dictitems: instance[key] = dictitems[key] return instance def construct_python_object_new(self, suffix, node): return self.construct_python_object_apply(suffix, node, newobj=True) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/none', FullConstructor.construct_yaml_null) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/bool', FullConstructor.construct_yaml_bool) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/str', FullConstructor.construct_python_str) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/unicode', FullConstructor.construct_python_unicode) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/bytes', FullConstructor.construct_python_bytes) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/int', FullConstructor.construct_yaml_int) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/long', FullConstructor.construct_python_long) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/float', FullConstructor.construct_yaml_float) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/complex', FullConstructor.construct_python_complex) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/list', FullConstructor.construct_yaml_seq) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/tuple', FullConstructor.construct_python_tuple) FullConstructor.add_constructor( 'tag:yaml.org,2002:python/dict', FullConstructor.construct_yaml_map) FullConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/name:', FullConstructor.construct_python_name) class UnsafeConstructor(FullConstructor): def find_python_module(self, name, mark): return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True) def find_python_name(self, name, mark): return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True) def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): return super(UnsafeConstructor, self).make_python_instance( suffix, node, args, kwds, newobj, unsafe=True) def set_python_instance_state(self, instance, state): return super(UnsafeConstructor, self).set_python_instance_state( instance, state, unsafe=True) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/module:', UnsafeConstructor.construct_python_module) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object:', UnsafeConstructor.construct_python_object) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/new:', UnsafeConstructor.construct_python_object_new) UnsafeConstructor.add_multi_constructor( 'tag:yaml.org,2002:python/object/apply:', UnsafeConstructor.construct_python_object_apply) # Constructor is same as UnsafeConstructor. Need to leave this in place in case # people have extended it directly. class Constructor(UnsafeConstructor): pass
28,639
Python
.py
655
32.651908
112
0.594242
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,432
tokens.py
rembo10_headphones/lib/yaml/tokens.py
class Token(object): def __init__(self, start_mark, end_mark): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in self.__dict__ if not key.endswith('_mark')] attributes.sort() arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) #class BOMToken(Token): # id = '<byte order mark>' class DirectiveToken(Token): id = '<directive>' def __init__(self, name, value, start_mark, end_mark): self.name = name self.value = value self.start_mark = start_mark self.end_mark = end_mark class DocumentStartToken(Token): id = '<document start>' class DocumentEndToken(Token): id = '<document end>' class StreamStartToken(Token): id = '<stream start>' def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndToken(Token): id = '<stream end>' class BlockSequenceStartToken(Token): id = '<block sequence start>' class BlockMappingStartToken(Token): id = '<block mapping start>' class BlockEndToken(Token): id = '<block end>' class FlowSequenceStartToken(Token): id = '[' class FlowMappingStartToken(Token): id = '{' class FlowSequenceEndToken(Token): id = ']' class FlowMappingEndToken(Token): id = '}' class KeyToken(Token): id = '?' class ValueToken(Token): id = ':' class BlockEntryToken(Token): id = '-' class FlowEntryToken(Token): id = ',' class AliasToken(Token): id = '<alias>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class AnchorToken(Token): id = '<anchor>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class TagToken(Token): id = '<tag>' def __init__(self, value, start_mark, end_mark): self.value = value self.start_mark = start_mark self.end_mark = end_mark class ScalarToken(Token): id = '<scalar>' def __init__(self, value, plain, start_mark, end_mark, style=None): self.value = value self.plain = plain self.start_mark = start_mark self.end_mark = end_mark self.style = style
2,573
Python
.py
81
25.888889
71
0.618064
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,433
representer.py
rembo10_headphones/lib/yaml/representer.py
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', 'RepresenterError'] from .error import * from .nodes import * import datetime, copyreg, types, base64, collections class RepresenterError(YAMLError): pass class BaseRepresenter: yaml_representers = {} yaml_multi_representers = {} def __init__(self, default_style=None, default_flow_style=False, sort_keys=True): self.default_style = default_style self.sort_keys = sort_keys self.default_flow_style = default_flow_style self.represented_objects = {} self.object_keeper = [] self.alias_key = None def represent(self, data): node = self.represent_data(data) self.serialize(node) self.represented_objects = {} self.object_keeper = [] self.alias_key = None def represent_data(self, data): if self.ignore_aliases(data): self.alias_key = None else: self.alias_key = id(data) if self.alias_key is not None: if self.alias_key in self.represented_objects: node = self.represented_objects[self.alias_key] #if node is None: # raise RepresenterError("recursive objects are not allowed: %r" % data) return node #self.represented_objects[alias_key] = None self.object_keeper.append(data) data_types = type(data).__mro__ if data_types[0] in self.yaml_representers: node = self.yaml_representers[data_types[0]](self, data) else: for data_type in data_types: if data_type in self.yaml_multi_representers: node = self.yaml_multi_representers[data_type](self, data) break else: if None in self.yaml_multi_representers: node = self.yaml_multi_representers[None](self, data) elif None in self.yaml_representers: node = self.yaml_representers[None](self, data) else: node = ScalarNode(None, str(data)) #if alias_key is not None: # self.represented_objects[alias_key] = node return node @classmethod def add_representer(cls, data_type, representer): if not 'yaml_representers' in cls.__dict__: cls.yaml_representers = cls.yaml_representers.copy() cls.yaml_representers[data_type] = representer @classmethod def add_multi_representer(cls, data_type, representer): if not 'yaml_multi_representers' in cls.__dict__: cls.yaml_multi_representers = cls.yaml_multi_representers.copy() cls.yaml_multi_representers[data_type] = representer def represent_scalar(self, tag, value, style=None): if style is None: style = self.default_style node = ScalarNode(tag, value, style=style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node return node def represent_sequence(self, tag, sequence, flow_style=None): value = [] node = SequenceNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True for item in sequence: node_item = self.represent_data(item) if not (isinstance(node_item, ScalarNode) and not node_item.style): best_style = False value.append(node_item) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def represent_mapping(self, tag, mapping, flow_style=None): value = [] node = MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, 'items'): mapping = list(mapping.items()) if self.sort_keys: try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, ScalarNode) and not node_key.style): best_style = False if not (isinstance(node_value, ScalarNode) and not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node def ignore_aliases(self, data): return False class SafeRepresenter(BaseRepresenter): def ignore_aliases(self, data): if data is None: return True if isinstance(data, tuple) and data == (): return True if isinstance(data, (str, bytes, bool, int, float)): return True def represent_none(self, data): return self.represent_scalar('tag:yaml.org,2002:null', 'null') def represent_str(self, data): return self.represent_scalar('tag:yaml.org,2002:str', data) def represent_binary(self, data): if hasattr(base64, 'encodebytes'): data = base64.encodebytes(data).decode('ascii') else: data = base64.encodestring(data).decode('ascii') return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') def represent_bool(self, data): if data: value = 'true' else: value = 'false' return self.represent_scalar('tag:yaml.org,2002:bool', value) def represent_int(self, data): return self.represent_scalar('tag:yaml.org,2002:int', str(data)) inf_value = 1e300 while repr(inf_value) != repr(inf_value*inf_value): inf_value *= inf_value def represent_float(self, data): if data != data or (data == 0.0 and data == 1.0): value = '.nan' elif data == self.inf_value: value = '.inf' elif data == -self.inf_value: value = '-.inf' else: value = repr(data).lower() # Note that in some cases `repr(data)` represents a float number # without the decimal parts. For instance: # >>> repr(1e17) # '1e17' # Unfortunately, this is not a valid float representation according # to the definition of the `!!float` tag. We fix this by adding # '.0' before the 'e' symbol. if '.' not in value and 'e' in value: value = value.replace('e', '.0e', 1) return self.represent_scalar('tag:yaml.org,2002:float', value) def represent_list(self, data): #pairs = (len(data) > 0 and isinstance(data, list)) #if pairs: # for item in data: # if not isinstance(item, tuple) or len(item) != 2: # pairs = False # break #if not pairs: return self.represent_sequence('tag:yaml.org,2002:seq', data) #value = [] #for item_key, item_value in data: # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', # [(item_key, item_value)])) #return SequenceNode(u'tag:yaml.org,2002:pairs', value) def represent_dict(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data) def represent_set(self, data): value = {} for key in data: value[key] = None return self.represent_mapping('tag:yaml.org,2002:set', value) def represent_date(self, data): value = data.isoformat() return self.represent_scalar('tag:yaml.org,2002:timestamp', value) def represent_datetime(self, data): value = data.isoformat(' ') return self.represent_scalar('tag:yaml.org,2002:timestamp', value) def represent_yaml_object(self, tag, data, cls, flow_style=None): if hasattr(data, '__getstate__'): state = data.__getstate__() else: state = data.__dict__.copy() return self.represent_mapping(tag, state, flow_style=flow_style) def represent_undefined(self, data): raise RepresenterError("cannot represent an object", data) SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) class Representer(SafeRepresenter): def represent_complex(self, data): if data.imag == 0.0: data = '%r' % data.real elif data.real == 0.0: data = '%rj' % data.imag elif data.imag > 0: data = '%r+%rj' % (data.real, data.imag) else: data = '%r%rj' % (data.real, data.imag) return self.represent_scalar('tag:yaml.org,2002:python/complex', data) def represent_tuple(self, data): return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) def represent_name(self, data): name = '%s.%s' % (data.__module__, data.__name__) return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') def represent_module(self, data): return self.represent_scalar( 'tag:yaml.org,2002:python/module:'+data.__name__, '') def represent_object(self, data): # We use __reduce__ API to save the data. data.__reduce__ returns # a tuple of length 2-5: # (function, args, state, listitems, dictitems) # For reconstructing, we calls function(*args), then set its state, # listitems, and dictitems if they are not None. # A special case is when function.__name__ == '__newobj__'. In this # case we create the object with args[0].__new__(*args). # Another special case is when __reduce__ returns a string - we don't # support it. # We produce a !!python/object, !!python/object/new or # !!python/object/apply node. cls = type(data) if cls in copyreg.dispatch_table: reduce = copyreg.dispatch_table[cls](data) elif hasattr(data, '__reduce_ex__'): reduce = data.__reduce_ex__(2) elif hasattr(data, '__reduce__'): reduce = data.__reduce__() else: raise RepresenterError("cannot represent an object", data) reduce = (list(reduce)+[None]*5)[:5] function, args, state, listitems, dictitems = reduce args = list(args) if state is None: state = {} if listitems is not None: listitems = list(listitems) if dictitems is not None: dictitems = dict(dictitems) if function.__name__ == '__newobj__': function = args[0] args = args[1:] tag = 'tag:yaml.org,2002:python/object/new:' newobj = True else: tag = 'tag:yaml.org,2002:python/object/apply:' newobj = False function_name = '%s.%s' % (function.__module__, function.__name__) if not args and not listitems and not dictitems \ and isinstance(state, dict) and newobj: return self.represent_mapping( 'tag:yaml.org,2002:python/object:'+function_name, state) if not listitems and not dictitems \ and isinstance(state, dict) and not state: return self.represent_sequence(tag+function_name, args) value = {} if args: value['args'] = args if state or not isinstance(state, dict): value['state'] = state if listitems: value['listitems'] = listitems if dictitems: value['dictitems'] = dictitems return self.represent_mapping(tag+function_name, value) def represent_ordered_dict(self, data): # Provide uniform representation across different Python versions. data_type = type(data) tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \ % (data_type.__module__, data_type.__name__) items = [[key, value] for key, value in data.items()] return self.represent_sequence(tag, [items]) Representer.add_representer(complex, Representer.represent_complex) Representer.add_representer(tuple, Representer.represent_tuple) Representer.add_multi_representer(type, Representer.represent_name) Representer.add_representer(collections.OrderedDict, Representer.represent_ordered_dict) Representer.add_representer(types.FunctionType, Representer.represent_name) Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) Representer.add_representer(types.ModuleType, Representer.represent_module) Representer.add_multi_representer(object, Representer.represent_object)
14,190
Python
.py
324
34.015432
91
0.614013
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,434
loader.py
rembo10_headphones/lib/yaml/loader.py
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] from .reader import * from .scanner import * from .parser import * from .composer import * from .constructor import * from .resolver import * class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) BaseConstructor.__init__(self) BaseResolver.__init__(self) class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) FullConstructor.__init__(self) Resolver.__init__(self) class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) SafeConstructor.__init__(self) Resolver.__init__(self) class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self) # UnsafeLoader is the same as Loader (which is and was always unsafe on # untrusted input). Use of either Loader or UnsafeLoader should be rare, since # FullLoad should be able to load almost all YAML safely. Loader is left intact # to ensure backwards compatibility. class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self)
2,061
Python
.py
51
34.078431
83
0.644645
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,435
dumper.py
rembo10_headphones/lib/yaml/dumper.py
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] from .emitter import * from .serializer import * from .representer import * from .resolver import * class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class Dumper(Emitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self)
2,837
Python
.py
53
42.396226
75
0.642162
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,436
__init__.py
rembo10_headphones/lib/yaml/__init__.py
from .error import * from .tokens import * from .events import * from .nodes import * from .loader import * from .dumper import * __version__ = '6.0' try: from .cyaml import * __with_libyaml__ = True except ImportError: __with_libyaml__ = False import io #------------------------------------------------------------------------------ # XXX "Warnings control" is now deprecated. Leaving in the API function to not # break code that uses it. #------------------------------------------------------------------------------ def warnings(settings=None): if settings is None: return {} #------------------------------------------------------------------------------ def scan(stream, Loader=Loader): """ Scan a YAML stream and produce scanning tokens. """ loader = Loader(stream) try: while loader.check_token(): yield loader.get_token() finally: loader.dispose() def parse(stream, Loader=Loader): """ Parse a YAML stream and produce parsing events. """ loader = Loader(stream) try: while loader.check_event(): yield loader.get_event() finally: loader.dispose() def compose(stream, Loader=Loader): """ Parse the first YAML document in a stream and produce the corresponding representation tree. """ loader = Loader(stream) try: return loader.get_single_node() finally: loader.dispose() def compose_all(stream, Loader=Loader): """ Parse all YAML documents in a stream and produce corresponding representation trees. """ loader = Loader(stream) try: while loader.check_node(): yield loader.get_node() finally: loader.dispose() def load(stream, Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose() def load_all(stream, Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ loader = Loader(stream) try: while loader.check_data(): yield loader.get_data() finally: loader.dispose() def full_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags except those known to be unsafe on untrusted input. """ return load(stream, FullLoader) def full_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve all tags except those known to be unsafe on untrusted input. """ return load_all(stream, FullLoader) def safe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve only basic YAML tags. This is known to be safe for untrusted input. """ return load(stream, SafeLoader) def safe_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve only basic YAML tags. This is known to be safe for untrusted input. """ return load_all(stream, SafeLoader) def unsafe_load(stream): """ Parse the first YAML document in a stream and produce the corresponding Python object. Resolve all tags, even those known to be unsafe on untrusted input. """ return load(stream, UnsafeLoader) def unsafe_load_all(stream): """ Parse all YAML documents in a stream and produce corresponding Python objects. Resolve all tags, even those known to be unsafe on untrusted input. """ return load_all(stream, UnsafeLoader) def emit(events, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None): """ Emit YAML parsing events into a stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: stream = io.StringIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break) try: for event in events: dumper.emit(event) finally: dumper.dispose() if getvalue: return getvalue() def serialize_all(nodes, stream=None, Dumper=Dumper, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): """ Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: stream = io.StringIO() else: stream = io.BytesIO() getvalue = stream.getvalue dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end) try: dumper.open() for node in nodes: dumper.serialize(node) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() def serialize(node, stream=None, Dumper=Dumper, **kwds): """ Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead. """ return serialize_all([node], stream, Dumper=Dumper, **kwds) def dump_all(documents, stream=None, Dumper=Dumper, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): """ Serialize a sequence of Python objects into a YAML stream. If stream is None, return the produced string instead. """ getvalue = None if stream is None: if encoding is None: stream = io.StringIO() else: stream = io.BytesIO() getvalue = stream.getvalue dumper = Dumper(stream, default_style=default_style, default_flow_style=default_flow_style, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, encoding=encoding, version=version, tags=tags, explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys) try: dumper.open() for data in documents: dumper.represent(data) dumper.close() finally: dumper.dispose() if getvalue: return getvalue() def dump(data, stream=None, Dumper=Dumper, **kwds): """ Serialize a Python object into a YAML stream. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=Dumper, **kwds) def safe_dump_all(documents, stream=None, **kwds): """ Serialize a sequence of Python objects into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all(documents, stream, Dumper=SafeDumper, **kwds) def safe_dump(data, stream=None, **kwds): """ Serialize a Python object into a YAML stream. Produce only basic YAML tags. If stream is None, return the produced string instead. """ return dump_all([data], stream, Dumper=SafeDumper, **kwds) def add_implicit_resolver(tag, regexp, first=None, Loader=None, Dumper=Dumper): """ Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None. """ if Loader is None: loader.Loader.add_implicit_resolver(tag, regexp, first) loader.FullLoader.add_implicit_resolver(tag, regexp, first) loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first) else: Loader.add_implicit_resolver(tag, regexp, first) Dumper.add_implicit_resolver(tag, regexp, first) def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ if Loader is None: loader.Loader.add_path_resolver(tag, path, kind) loader.FullLoader.add_path_resolver(tag, path, kind) loader.UnsafeLoader.add_path_resolver(tag, path, kind) else: Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind) def add_constructor(tag, constructor, Loader=None): """ Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object. """ if Loader is None: loader.Loader.add_constructor(tag, constructor) loader.FullLoader.add_constructor(tag, constructor) loader.UnsafeLoader.add_constructor(tag, constructor) else: Loader.add_constructor(tag, constructor) def add_multi_constructor(tag_prefix, multi_constructor, Loader=None): """ Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object. """ if Loader is None: loader.Loader.add_multi_constructor(tag_prefix, multi_constructor) loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor) loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor) else: Loader.add_multi_constructor(tag_prefix, multi_constructor) def add_representer(data_type, representer, Dumper=Dumper): """ Add a representer for the given type. Representer is a function accepting a Dumper instance and an instance of the given data type and producing the corresponding representation node. """ Dumper.add_representer(data_type, representer) def add_multi_representer(data_type, multi_representer, Dumper=Dumper): """ Add a representer for the given type. Multi-representer is a function accepting a Dumper instance and an instance of the given data type or subtype and producing the corresponding representation node. """ Dumper.add_multi_representer(data_type, multi_representer) class YAMLObjectMetaclass(type): """ The metaclass for YAMLObject. """ def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: if isinstance(cls.yaml_loader, list): for loader in cls.yaml_loader: loader.add_constructor(cls.yaml_tag, cls.from_yaml) else: cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) cls.yaml_dumper.add_representer(cls, cls.to_yaml) class YAMLObject(metaclass=YAMLObjectMetaclass): """ An object that can dump itself to a YAML stream and load itself from a YAML stream. """ __slots__ = () # no direct instantiation, so allow immutable subclasses yaml_loader = [Loader, FullLoader, UnsafeLoader] yaml_dumper = Dumper yaml_tag = None yaml_flow_style = None @classmethod def from_yaml(cls, loader, node): """ Convert a representation node to a Python object. """ return loader.construct_yaml_object(node, cls) @classmethod def to_yaml(cls, dumper, data): """ Convert a Python object to a representation node. """ return dumper.represent_yaml_object(cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style)
12,309
Python
.py
344
29.625
90
0.664653
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,437
events.py
rembo10_headphones/lib/yaml/events.py
# Abstract classes. class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] if hasattr(self, key)] arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) class NodeEvent(Event): def __init__(self, anchor, start_mark=None, end_mark=None): self.anchor = anchor self.start_mark = start_mark self.end_mark = end_mark class CollectionStartEvent(NodeEvent): def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class CollectionEndEvent(Event): pass # Implementations. class StreamStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndEvent(Event): pass class DocumentStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit self.version = version self.tags = tags class DocumentEndEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit class AliasEvent(NodeEvent): pass class ScalarEvent(NodeEvent): def __init__(self, anchor, tag, implicit, value, start_mark=None, end_mark=None, style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class SequenceStartEvent(CollectionStartEvent): pass class SequenceEndEvent(CollectionEndEvent): pass class MappingStartEvent(CollectionStartEvent): pass class MappingEndEvent(CollectionEndEvent): pass
2,445
Python
.py
69
28.449275
77
0.642221
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,438
composer.py
rembo10_headphones/lib/yaml/composer.py
__all__ = ['Composer', 'ComposerError'] from .error import MarkedYAMLError from .events import * from .nodes import * class ComposerError(MarkedYAMLError): pass class Composer: def __init__(self): self.anchors = {} def check_node(self): # Drop the STREAM-START event. if self.check_event(StreamStartEvent): self.get_event() # If there are more documents available? return not self.check_event(StreamEndEvent) def get_node(self): # Get the root node of the next document. if not self.check_event(StreamEndEvent): return self.compose_document() def get_single_node(self): # Drop the STREAM-START event. self.get_event() # Compose a document if the stream is not empty. document = None if not self.check_event(StreamEndEvent): document = self.compose_document() # Ensure that the stream contains no more documents. if not self.check_event(StreamEndEvent): event = self.get_event() raise ComposerError("expected a single document in the stream", document.start_mark, "but found another document", event.start_mark) # Drop the STREAM-END event. self.get_event() return document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() self.anchors = {} return node def compose_node(self, parent, index): if self.check_event(AliasEvent): event = self.get_event() anchor = event.anchor if anchor not in self.anchors: raise ComposerError(None, None, "found undefined alias %r" % anchor, event.start_mark) return self.anchors[anchor] event = self.peek_event() anchor = event.anchor if anchor is not None: if anchor in self.anchors: raise ComposerError("found duplicate anchor %r; first occurrence" % anchor, self.anchors[anchor].start_mark, "second occurrence", event.start_mark) self.descend_resolver(parent, index) if self.check_event(ScalarEvent): node = self.compose_scalar_node(anchor) elif self.check_event(SequenceStartEvent): node = self.compose_sequence_node(anchor) elif self.check_event(MappingStartEvent): node = self.compose_mapping_node(anchor) self.ascend_resolver() return node def compose_scalar_node(self, anchor): event = self.get_event() tag = event.tag if tag is None or tag == '!': tag = self.resolve(ScalarNode, event.value, event.implicit) node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style) if anchor is not None: self.anchors[anchor] = node return node def compose_sequence_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(SequenceNode, None, start_event.implicit) node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node index = 0 while not self.check_event(SequenceEndEvent): node.value.append(self.compose_node(node, index)) index += 1 end_event = self.get_event() node.end_mark = end_event.end_mark return node def compose_mapping_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(MappingNode, None, start_event.implicit) node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): #key_event = self.peek_event() item_key = self.compose_node(node, None) #if item_key in node.value: # raise ComposerError("while composing a mapping", start_event.start_mark, # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) #node.value[item_key] = item_value node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node
4,883
Python
.py
117
31.213675
89
0.597597
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,439
reader.py
rembo10_headphones/lib/yaml/reader.py
# This module contains abstractions for the input stream. You don't have to # looks further, there are no pretty code. # # We define two classes here. # # Mark(source, line, column) # It's just a record and its only use is producing nice error messages. # Parser does not use it for any other purposes. # # Reader(source, data) # Reader determines the encoding of `data` and converts it to unicode. # Reader provides the following methods and attributes: # reader.peek(length=1) - return the next `length` characters # reader.forward(length=1) - move the current position to `length` characters. # reader.index - the number of the current character. # reader.line, stream.column - the line and the column of the current character. __all__ = ['Reader', 'ReaderError'] from .error import YAMLError, Mark import codecs, re class ReaderError(YAMLError): def __init__(self, name, position, character, encoding, reason): self.name = name self.character = character self.position = position self.encoding = encoding self.reason = reason def __str__(self): if isinstance(self.character, bytes): return "'%s' codec can't decode byte #x%02x: %s\n" \ " in \"%s\", position %d" \ % (self.encoding, ord(self.character), self.reason, self.name, self.position) else: return "unacceptable character #x%04x: %s\n" \ " in \"%s\", position %d" \ % (self.character, self.reason, self.name, self.position) class Reader(object): # Reader: # - determines the data encoding and converts it to a unicode string, # - checks if characters are in allowed range, # - adds '\0' to the end. # Reader accepts # - a `bytes` object, # - a `str` object, # - a file-like object with its `read` method returning `str`, # - a file-like object with its `read` method returning `unicode`. # Yeah, it's ugly and slow. def __init__(self, stream): self.name = None self.stream = None self.stream_pointer = 0 self.eof = True self.buffer = '' self.pointer = 0 self.raw_buffer = None self.raw_decode = None self.encoding = None self.index = 0 self.line = 0 self.column = 0 if isinstance(stream, str): self.name = "<unicode string>" self.check_printable(stream) self.buffer = stream+'\0' elif isinstance(stream, bytes): self.name = "<byte string>" self.raw_buffer = stream self.determine_encoding() else: self.stream = stream self.name = getattr(stream, 'name', "<file>") self.eof = False self.raw_buffer = None self.determine_encoding() def peek(self, index=0): try: return self.buffer[self.pointer+index] except IndexError: self.update(index+1) return self.buffer[self.pointer+index] def prefix(self, length=1): if self.pointer+length >= len(self.buffer): self.update(length) return self.buffer[self.pointer:self.pointer+length] def forward(self, length=1): if self.pointer+length+1 >= len(self.buffer): self.update(length+1) while length: ch = self.buffer[self.pointer] self.pointer += 1 self.index += 1 if ch in '\n\x85\u2028\u2029' \ or (ch == '\r' and self.buffer[self.pointer] != '\n'): self.line += 1 self.column = 0 elif ch != '\uFEFF': self.column += 1 length -= 1 def get_mark(self): if self.stream is None: return Mark(self.name, self.index, self.line, self.column, self.buffer, self.pointer) else: return Mark(self.name, self.index, self.line, self.column, None, None) def determine_encoding(self): while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): self.update_raw() if isinstance(self.raw_buffer, bytes): if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): self.raw_decode = codecs.utf_16_le_decode self.encoding = 'utf-16-le' elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): self.raw_decode = codecs.utf_16_be_decode self.encoding = 'utf-16-be' else: self.raw_decode = codecs.utf_8_decode self.encoding = 'utf-8' self.update(1) NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') def check_printable(self, data): match = self.NON_PRINTABLE.search(data) if match: character = match.group() position = self.index+(len(self.buffer)-self.pointer)+match.start() raise ReaderError(self.name, position, ord(character), 'unicode', "special characters are not allowed") def update(self, length): if self.raw_buffer is None: return self.buffer = self.buffer[self.pointer:] self.pointer = 0 while len(self.buffer) < length: if not self.eof: self.update_raw() if self.raw_decode is not None: try: data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof) except UnicodeDecodeError as exc: character = self.raw_buffer[exc.start] if self.stream is not None: position = self.stream_pointer-len(self.raw_buffer)+exc.start else: position = exc.start raise ReaderError(self.name, position, character, exc.encoding, exc.reason) else: data = self.raw_buffer converted = len(data) self.check_printable(data) self.buffer += data self.raw_buffer = self.raw_buffer[converted:] if self.eof: self.buffer += '\0' self.raw_buffer = None break def update_raw(self, size=4096): data = self.stream.read(size) if self.raw_buffer is None: self.raw_buffer = data else: self.raw_buffer += data self.stream_pointer += len(data) if not data: self.eof = True
6,794
Python
.py
167
29.634731
107
0.561507
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,440
parser.py
rembo10_headphones/lib/yaml/parser.py
# The following YAML grammar is LL(1) and is parsed by a recursive descent # parser. # # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* # block_node_or_indentless_sequence ::= # ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # FIRST sets: # # stream: { STREAM-START } # explicit_document: { DIRECTIVE DOCUMENT-START } # implicit_document: FIRST(block_node) # block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } # block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # block_sequence: { BLOCK-SEQUENCE-START } # block_mapping: { BLOCK-MAPPING-START } # block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } # indentless_sequence: { ENTRY } # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } # flow_sequence: { FLOW-SEQUENCE-START } # flow_mapping: { FLOW-MAPPING-START } # flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } # flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } __all__ = ['Parser', 'ParserError'] from .error import MarkedYAMLError from .tokens import * from .events import * from .scanner import * class ParserError(MarkedYAMLError): pass class Parser: # Since writing a recursive-descendant parser is a straightforward task, we # do not give many comments here. DEFAULT_TAGS = { '!': '!', '!!': 'tag:yaml.org,2002:', } def __init__(self): self.current_event = None self.yaml_version = None self.tag_handles = {} self.states = [] self.marks = [] self.state = self.parse_stream_start def dispose(self): # Reset the state attributes (to clear self-references) self.states = [] self.state = None def check_event(self, *choices): # Check the type of the next event. if self.current_event is None: if self.state: self.current_event = self.state() if self.current_event is not None: if not choices: return True for choice in choices: if isinstance(self.current_event, choice): return True return False def peek_event(self): # Get the next event. if self.current_event is None: if self.state: self.current_event = self.state() return self.current_event def get_event(self): # Get the next event and proceed further. if self.current_event is None: if self.state: self.current_event = self.state() value = self.current_event self.current_event = None return value # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END # implicit_document ::= block_node DOCUMENT-END* # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* def parse_stream_start(self): # Parse the stream start. token = self.get_token() event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) # Prepare the next state. self.state = self.parse_implicit_document_start return event def parse_implicit_document_start(self): # Parse an implicit document. if not self.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): self.tag_handles = self.DEFAULT_TAGS token = self.peek_token() start_mark = end_mark = token.start_mark event = DocumentStartEvent(start_mark, end_mark, explicit=False) # Prepare the next state. self.states.append(self.parse_document_end) self.state = self.parse_block_node return event else: return self.parse_document_start() def parse_document_start(self): # Parse any extra document end indicators. while self.check_token(DocumentEndToken): self.get_token() # Parse an explicit document. if not self.check_token(StreamEndToken): token = self.peek_token() start_mark = token.start_mark version, tags = self.process_directives() if not self.check_token(DocumentStartToken): raise ParserError(None, None, "expected '<document start>', but found %r" % self.peek_token().id, self.peek_token().start_mark) token = self.get_token() end_mark = token.end_mark event = DocumentStartEvent(start_mark, end_mark, explicit=True, version=version, tags=tags) self.states.append(self.parse_document_end) self.state = self.parse_document_content else: # Parse the end of the stream. token = self.get_token() event = StreamEndEvent(token.start_mark, token.end_mark) assert not self.states assert not self.marks self.state = None return event def parse_document_end(self): # Parse the document end. token = self.peek_token() start_mark = end_mark = token.start_mark explicit = False if self.check_token(DocumentEndToken): token = self.get_token() end_mark = token.end_mark explicit = True event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) # Prepare the next state. self.state = self.parse_document_start return event def parse_document_content(self): if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken): event = self.process_empty_scalar(self.peek_token().start_mark) self.state = self.states.pop() return event else: return self.parse_block_node() def process_directives(self): self.yaml_version = None self.tag_handles = {} while self.check_token(DirectiveToken): token = self.get_token() if token.name == 'YAML': if self.yaml_version is not None: raise ParserError(None, None, "found duplicate YAML directive", token.start_mark) major, minor = token.value if major != 1: raise ParserError(None, None, "found incompatible YAML document (version 1.* is required)", token.start_mark) self.yaml_version = token.value elif token.name == 'TAG': handle, prefix = token.value if handle in self.tag_handles: raise ParserError(None, None, "duplicate tag handle %r" % handle, token.start_mark) self.tag_handles[handle] = prefix if self.tag_handles: value = self.yaml_version, self.tag_handles.copy() else: value = self.yaml_version, None for key in self.DEFAULT_TAGS: if key not in self.tag_handles: self.tag_handles[key] = self.DEFAULT_TAGS[key] return value # block_node_or_indentless_sequence ::= ALIAS # | properties (block_content | indentless_block_sequence)? # | block_content # | indentless_block_sequence # block_node ::= ALIAS # | properties block_content? # | block_content # flow_node ::= ALIAS # | properties flow_content? # | flow_content # properties ::= TAG ANCHOR? | ANCHOR TAG? # block_content ::= block_collection | flow_collection | SCALAR # flow_content ::= flow_collection | SCALAR # block_collection ::= block_sequence | block_mapping # flow_collection ::= flow_sequence | flow_mapping def parse_block_node(self): return self.parse_node(block=True) def parse_flow_node(self): return self.parse_node() def parse_block_node_or_indentless_sequence(self): return self.parse_node(block=True, indentless_sequence=True) def parse_node(self, block=False, indentless_sequence=False): if self.check_token(AliasToken): token = self.get_token() event = AliasEvent(token.value, token.start_mark, token.end_mark) self.state = self.states.pop() else: anchor = None tag = None start_mark = end_mark = tag_mark = None if self.check_token(AnchorToken): token = self.get_token() start_mark = token.start_mark end_mark = token.end_mark anchor = token.value if self.check_token(TagToken): token = self.get_token() tag_mark = token.start_mark end_mark = token.end_mark tag = token.value elif self.check_token(TagToken): token = self.get_token() start_mark = tag_mark = token.start_mark end_mark = token.end_mark tag = token.value if self.check_token(AnchorToken): token = self.get_token() end_mark = token.end_mark anchor = token.value if tag is not None: handle, suffix = tag if handle is not None: if handle not in self.tag_handles: raise ParserError("while parsing a node", start_mark, "found undefined tag handle %r" % handle, tag_mark) tag = self.tag_handles[handle]+suffix else: tag = suffix #if tag == '!': # raise ParserError("while parsing a node", start_mark, # "found non-specific tag '!'", tag_mark, # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") if start_mark is None: start_mark = end_mark = self.peek_token().start_mark event = None implicit = (tag is None or tag == '!') if indentless_sequence and self.check_token(BlockEntryToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark) self.state = self.parse_indentless_sequence_entry else: if self.check_token(ScalarToken): token = self.get_token() end_mark = token.end_mark if (token.plain and tag is None) or tag == '!': implicit = (True, False) elif tag is None: implicit = (False, True) else: implicit = (False, False) event = ScalarEvent(anchor, tag, implicit, token.value, start_mark, end_mark, style=token.style) self.state = self.states.pop() elif self.check_token(FlowSequenceStartToken): end_mark = self.peek_token().end_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_sequence_first_entry elif self.check_token(FlowMappingStartToken): end_mark = self.peek_token().end_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=True) self.state = self.parse_flow_mapping_first_key elif block and self.check_token(BlockSequenceStartToken): end_mark = self.peek_token().start_mark event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_sequence_first_entry elif block and self.check_token(BlockMappingStartToken): end_mark = self.peek_token().start_mark event = MappingStartEvent(anchor, tag, implicit, start_mark, end_mark, flow_style=False) self.state = self.parse_block_mapping_first_key elif anchor is not None or tag is not None: # Empty scalars are allowed even if a tag or an anchor is # specified. event = ScalarEvent(anchor, tag, (implicit, False), '', start_mark, end_mark) self.state = self.states.pop() else: if block: node = 'block' else: node = 'flow' token = self.peek_token() raise ParserError("while parsing a %s node" % node, start_mark, "expected the node content, but found %r" % token.id, token.start_mark) return event # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END def parse_block_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_sequence_entry() def parse_block_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, BlockEndToken): self.states.append(self.parse_block_sequence_entry) return self.parse_block_node() else: self.state = self.parse_block_sequence_entry return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block collection", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ def parse_indentless_sequence_entry(self): if self.check_token(BlockEntryToken): token = self.get_token() if not self.check_token(BlockEntryToken, KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_indentless_sequence_entry) return self.parse_block_node() else: self.state = self.parse_indentless_sequence_entry return self.process_empty_scalar(token.end_mark) token = self.peek_token() event = SequenceEndEvent(token.start_mark, token.start_mark) self.state = self.states.pop() return event # block_mapping ::= BLOCK-MAPPING_START # ((KEY block_node_or_indentless_sequence?)? # (VALUE block_node_or_indentless_sequence?)?)* # BLOCK-END def parse_block_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_block_mapping_key() def parse_block_mapping_key(self): if self.check_token(KeyToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_value) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_value return self.process_empty_scalar(token.end_mark) if not self.check_token(BlockEndToken): token = self.peek_token() raise ParserError("while parsing a block mapping", self.marks[-1], "expected <block end>, but found %r" % token.id, token.start_mark) token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_block_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(KeyToken, ValueToken, BlockEndToken): self.states.append(self.parse_block_mapping_key) return self.parse_block_node_or_indentless_sequence() else: self.state = self.parse_block_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_block_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) # flow_sequence ::= FLOW-SEQUENCE-START # (flow_sequence_entry FLOW-ENTRY)* # flow_sequence_entry? # FLOW-SEQUENCE-END # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? # # Note that while production rules for both flow_sequence_entry and # flow_mapping_entry are equal, their interpretations are different. # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` # generate an inline mapping (set syntax). def parse_flow_sequence_first_entry(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_sequence_entry(first=True) def parse_flow_sequence_entry(self, first=False): if not self.check_token(FlowSequenceEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow sequence", self.marks[-1], "expected ',' or ']', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.peek_token() event = MappingStartEvent(None, None, True, token.start_mark, token.end_mark, flow_style=True) self.state = self.parse_flow_sequence_entry_mapping_key return event elif not self.check_token(FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry) return self.parse_flow_node() token = self.get_token() event = SequenceEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_sequence_entry_mapping_key(self): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_value return self.process_empty_scalar(token.end_mark) def parse_flow_sequence_entry_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowSequenceEndToken): self.states.append(self.parse_flow_sequence_entry_mapping_end) return self.parse_flow_node() else: self.state = self.parse_flow_sequence_entry_mapping_end return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_sequence_entry_mapping_end token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_sequence_entry_mapping_end(self): self.state = self.parse_flow_sequence_entry token = self.peek_token() return MappingEndEvent(token.start_mark, token.start_mark) # flow_mapping ::= FLOW-MAPPING-START # (flow_mapping_entry FLOW-ENTRY)* # flow_mapping_entry? # FLOW-MAPPING-END # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? def parse_flow_mapping_first_key(self): token = self.get_token() self.marks.append(token.start_mark) return self.parse_flow_mapping_key(first=True) def parse_flow_mapping_key(self, first=False): if not self.check_token(FlowMappingEndToken): if not first: if self.check_token(FlowEntryToken): self.get_token() else: token = self.peek_token() raise ParserError("while parsing a flow mapping", self.marks[-1], "expected ',' or '}', but got %r" % token.id, token.start_mark) if self.check_token(KeyToken): token = self.get_token() if not self.check_token(ValueToken, FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_value) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_value return self.process_empty_scalar(token.end_mark) elif not self.check_token(FlowMappingEndToken): self.states.append(self.parse_flow_mapping_empty_value) return self.parse_flow_node() token = self.get_token() event = MappingEndEvent(token.start_mark, token.end_mark) self.state = self.states.pop() self.marks.pop() return event def parse_flow_mapping_value(self): if self.check_token(ValueToken): token = self.get_token() if not self.check_token(FlowEntryToken, FlowMappingEndToken): self.states.append(self.parse_flow_mapping_key) return self.parse_flow_node() else: self.state = self.parse_flow_mapping_key return self.process_empty_scalar(token.end_mark) else: self.state = self.parse_flow_mapping_key token = self.peek_token() return self.process_empty_scalar(token.start_mark) def parse_flow_mapping_empty_value(self): self.state = self.parse_flow_mapping_key return self.process_empty_scalar(self.peek_token().start_mark) def process_empty_scalar(self, mark): return ScalarEvent(None, None, (True, False), '', mark, mark)
25,495
Python
.py
531
36.124294
156
0.574596
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,441
scanner.py
rembo10_headphones/lib/yaml/scanner.py
# Scanner produces tokens of the following types: # STREAM-START # STREAM-END # DIRECTIVE(name, value) # DOCUMENT-START # DOCUMENT-END # BLOCK-SEQUENCE-START # BLOCK-MAPPING-START # BLOCK-END # FLOW-SEQUENCE-START # FLOW-MAPPING-START # FLOW-SEQUENCE-END # FLOW-MAPPING-END # BLOCK-ENTRY # FLOW-ENTRY # KEY # VALUE # ALIAS(value) # ANCHOR(value) # TAG(value) # SCALAR(value, plain, style) # # Read comments in the Scanner code for more details. # __all__ = ['Scanner', 'ScannerError'] from .error import MarkedYAMLError from .tokens import * class ScannerError(MarkedYAMLError): pass class SimpleKey: # See below simple keys treatment. def __init__(self, token_number, required, index, line, column, mark): self.token_number = token_number self.required = required self.index = index self.line = line self.column = column self.mark = mark class Scanner: def __init__(self): """Initialize the scanner.""" # It is assumed that Scanner and Reader will have a common descendant. # Reader do the dirty work of checking for BOM and converting the # input data to Unicode. It also adds NUL to the end. # # Reader supports the following methods # self.peek(i=0) # peek the next i-th character # self.prefix(l=1) # peek the next l characters # self.forward(l=1) # read the next l characters and move the pointer. # Had we reached the end of the stream? self.done = False # The number of unclosed '{' and '['. `flow_level == 0` means block # context. self.flow_level = 0 # List of processed tokens that are not yet emitted. self.tokens = [] # Add the STREAM-START token. self.fetch_stream_start() # Number of tokens that were emitted through the `get_token` method. self.tokens_taken = 0 # The current indentation level. self.indent = -1 # Past indentation levels. self.indents = [] # Variables related to simple keys treatment. # A simple key is a key that is not denoted by the '?' indicator. # Example of simple keys: # --- # block simple key: value # ? not a simple key: # : { flow simple key: value } # We emit the KEY token before all keys, so when we find a potential # simple key, we try to locate the corresponding ':' indicator. # Simple keys should be limited to a single line and 1024 characters. # Can a simple key start at the current position? A simple key may # start: # - at the beginning of the line, not counting indentation spaces # (in block context), # - after '{', '[', ',' (in the flow context), # - after '?', ':', '-' (in the block context). # In the block context, this flag also signifies if a block collection # may start at the current position. self.allow_simple_key = True # Keep track of possible simple keys. This is a dictionary. The key # is `flow_level`; there can be no more that one possible simple key # for each level. The value is a SimpleKey record: # (token_number, required, index, line, column, mark) # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), # '[', or '{' tokens. self.possible_simple_keys = {} # Public methods. def check_token(self, *choices): # Check if the next token is one of the given types. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: if not choices: return True for choice in choices: if isinstance(self.tokens[0], choice): return True return False def peek_token(self): # Return the next token, but do not delete if from the queue. # Return None if no more tokens. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: return self.tokens[0] else: return None def get_token(self): # Return the next token. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: self.tokens_taken += 1 return self.tokens.pop(0) # Private methods. def need_more_tokens(self): if self.done: return False if not self.tokens: return True # The current token may be a potential simple key, so we # need to look further. self.stale_possible_simple_keys() if self.next_possible_simple_key() == self.tokens_taken: return True def fetch_more_tokens(self): # Eat whitespaces and comments until we reach the next token. self.scan_to_next_token() # Remove obsolete possible simple keys. self.stale_possible_simple_keys() # Compare the current indentation and column. It may add some tokens # and decrease the current indentation level. self.unwind_indent(self.column) # Peek the next character. ch = self.peek() # Is it the end of stream? if ch == '\0': return self.fetch_stream_end() # Is it a directive? if ch == '%' and self.check_directive(): return self.fetch_directive() # Is it the document start? if ch == '-' and self.check_document_start(): return self.fetch_document_start() # Is it the document end? if ch == '.' and self.check_document_end(): return self.fetch_document_end() # TODO: support for BOM within a stream. #if ch == '\uFEFF': # return self.fetch_bom() <-- issue BOMToken # Note: the order of the following checks is NOT significant. # Is it the flow sequence start indicator? if ch == '[': return self.fetch_flow_sequence_start() # Is it the flow mapping start indicator? if ch == '{': return self.fetch_flow_mapping_start() # Is it the flow sequence end indicator? if ch == ']': return self.fetch_flow_sequence_end() # Is it the flow mapping end indicator? if ch == '}': return self.fetch_flow_mapping_end() # Is it the flow entry indicator? if ch == ',': return self.fetch_flow_entry() # Is it the block entry indicator? if ch == '-' and self.check_block_entry(): return self.fetch_block_entry() # Is it the key indicator? if ch == '?' and self.check_key(): return self.fetch_key() # Is it the value indicator? if ch == ':' and self.check_value(): return self.fetch_value() # Is it an alias? if ch == '*': return self.fetch_alias() # Is it an anchor? if ch == '&': return self.fetch_anchor() # Is it a tag? if ch == '!': return self.fetch_tag() # Is it a literal scalar? if ch == '|' and not self.flow_level: return self.fetch_literal() # Is it a folded scalar? if ch == '>' and not self.flow_level: return self.fetch_folded() # Is it a single quoted scalar? if ch == '\'': return self.fetch_single() # Is it a double quoted scalar? if ch == '\"': return self.fetch_double() # It must be a plain scalar then. if self.check_plain(): return self.fetch_plain() # No? It's an error. Let's produce a nice error message. raise ScannerError("while scanning for the next token", None, "found character %r that cannot start any token" % ch, self.get_mark()) # Simple keys treatment. def next_possible_simple_key(self): # Return the number of the nearest possible simple key. Actually we # don't need to loop through the whole dictionary. We may replace it # with the following code: # if not self.possible_simple_keys: # return None # return self.possible_simple_keys[ # min(self.possible_simple_keys.keys())].token_number min_token_number = None for level in self.possible_simple_keys: key = self.possible_simple_keys[level] if min_token_number is None or key.token_number < min_token_number: min_token_number = key.token_number return min_token_number def stale_possible_simple_keys(self): # Remove entries that are no longer possible simple keys. According to # the YAML specification, simple keys # - should be limited to a single line, # - should be no longer than 1024 characters. # Disabling this procedure will allow simple keys of any length and # height (may cause problems if indentation is broken though). for level in list(self.possible_simple_keys): key = self.possible_simple_keys[level] if key.line != self.line \ or self.index-key.index > 1024: if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[level] def save_possible_simple_key(self): # The next token may start a simple key. We check if it's possible # and save its position. This function is called for # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. # Check if a simple key is required at the current position. required = not self.flow_level and self.indent == self.column # The next token might be a simple key. Let's save it's number and # position. if self.allow_simple_key: self.remove_possible_simple_key() token_number = self.tokens_taken+len(self.tokens) key = SimpleKey(token_number, required, self.index, self.line, self.column, self.get_mark()) self.possible_simple_keys[self.flow_level] = key def remove_possible_simple_key(self): # Remove the saved possible key position at the current flow level. if self.flow_level in self.possible_simple_keys: key = self.possible_simple_keys[self.flow_level] if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[self.flow_level] # Indentation functions. def unwind_indent(self, column): ## In flow context, tokens should respect indentation. ## Actually the condition should be `self.indent >= column` according to ## the spec. But this condition will prohibit intuitively correct ## constructions such as ## key : { ## } #if self.flow_level and self.indent > column: # raise ScannerError(None, None, # "invalid indentation or unclosed '[' or '{'", # self.get_mark()) # In the flow context, indentation is ignored. We make the scanner less # restrictive then specification requires. if self.flow_level: return # In block context, we may need to issue the BLOCK-END tokens. while self.indent > column: mark = self.get_mark() self.indent = self.indents.pop() self.tokens.append(BlockEndToken(mark, mark)) def add_indent(self, column): # Check if we need to increase indentation. if self.indent < column: self.indents.append(self.indent) self.indent = column return True return False # Fetchers. def fetch_stream_start(self): # We always add STREAM-START as the first token and STREAM-END as the # last token. # Read the token. mark = self.get_mark() # Add STREAM-START. self.tokens.append(StreamStartToken(mark, mark, encoding=self.encoding)) def fetch_stream_end(self): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False self.possible_simple_keys = {} # Read the token. mark = self.get_mark() # Add STREAM-END. self.tokens.append(StreamEndToken(mark, mark)) # The steam is finished. self.done = True def fetch_directive(self): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False # Scan and add DIRECTIVE. self.tokens.append(self.scan_directive()) def fetch_document_start(self): self.fetch_document_indicator(DocumentStartToken) def fetch_document_end(self): self.fetch_document_indicator(DocumentEndToken) def fetch_document_indicator(self, TokenClass): # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. Note that there could not be a block collection # after '---'. self.remove_possible_simple_key() self.allow_simple_key = False # Add DOCUMENT-START or DOCUMENT-END. start_mark = self.get_mark() self.forward(3) end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_start(self): self.fetch_flow_collection_start(FlowSequenceStartToken) def fetch_flow_mapping_start(self): self.fetch_flow_collection_start(FlowMappingStartToken) def fetch_flow_collection_start(self, TokenClass): # '[' and '{' may start a simple key. self.save_possible_simple_key() # Increase the flow level. self.flow_level += 1 # Simple keys are allowed after '[' and '{'. self.allow_simple_key = True # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_end(self): self.fetch_flow_collection_end(FlowSequenceEndToken) def fetch_flow_mapping_end(self): self.fetch_flow_collection_end(FlowMappingEndToken) def fetch_flow_collection_end(self, TokenClass): # Reset possible simple key on the current level. self.remove_possible_simple_key() # Decrease the flow level. self.flow_level -= 1 # No simple keys after ']' or '}'. self.allow_simple_key = False # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_entry(self): # Simple keys are allowed after ','. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add FLOW-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(FlowEntryToken(start_mark, end_mark)) def fetch_block_entry(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a new entry? if not self.allow_simple_key: raise ScannerError(None, None, "sequence entries are not allowed here", self.get_mark()) # We may need to add BLOCK-SEQUENCE-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockSequenceStartToken(mark, mark)) # It's an error for the block entry to occur in the flow context, # but we let the parser detect this. else: pass # Simple keys are allowed after '-'. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add BLOCK-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(BlockEntryToken(start_mark, end_mark)) def fetch_key(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a key (not necessary a simple)? if not self.allow_simple_key: raise ScannerError(None, None, "mapping keys are not allowed here", self.get_mark()) # We may need to add BLOCK-MAPPING-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after '?' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add KEY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(KeyToken(start_mark, end_mark)) def fetch_value(self): # Do we determine a simple key? if self.flow_level in self.possible_simple_keys: # Add KEY. key = self.possible_simple_keys[self.flow_level] del self.possible_simple_keys[self.flow_level] self.tokens.insert(key.token_number-self.tokens_taken, KeyToken(key.mark, key.mark)) # If this key starts a new block mapping, we need to add # BLOCK-MAPPING-START. if not self.flow_level: if self.add_indent(key.column): self.tokens.insert(key.token_number-self.tokens_taken, BlockMappingStartToken(key.mark, key.mark)) # There cannot be two simple keys one after another. self.allow_simple_key = False # It must be a part of a complex key. else: # Block context needs additional checks. # (Do we really need them? They will be caught by the parser # anyway.) if not self.flow_level: # We are allowed to start a complex value if and only if # we can start a simple key. if not self.allow_simple_key: raise ScannerError(None, None, "mapping values are not allowed here", self.get_mark()) # If this value starts a new block mapping, we need to add # BLOCK-MAPPING-START. It will be detected as an error later by # the parser. if not self.flow_level: if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after ':' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add VALUE. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(ValueToken(start_mark, end_mark)) def fetch_alias(self): # ALIAS could be a simple key. self.save_possible_simple_key() # No simple keys after ALIAS. self.allow_simple_key = False # Scan and add ALIAS. self.tokens.append(self.scan_anchor(AliasToken)) def fetch_anchor(self): # ANCHOR could start a simple key. self.save_possible_simple_key() # No simple keys after ANCHOR. self.allow_simple_key = False # Scan and add ANCHOR. self.tokens.append(self.scan_anchor(AnchorToken)) def fetch_tag(self): # TAG could start a simple key. self.save_possible_simple_key() # No simple keys after TAG. self.allow_simple_key = False # Scan and add TAG. self.tokens.append(self.scan_tag()) def fetch_literal(self): self.fetch_block_scalar(style='|') def fetch_folded(self): self.fetch_block_scalar(style='>') def fetch_block_scalar(self, style): # A simple key may follow a block scalar. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Scan and add SCALAR. self.tokens.append(self.scan_block_scalar(style)) def fetch_single(self): self.fetch_flow_scalar(style='\'') def fetch_double(self): self.fetch_flow_scalar(style='"') def fetch_flow_scalar(self, style): # A flow scalar could be a simple key. self.save_possible_simple_key() # No simple keys after flow scalars. self.allow_simple_key = False # Scan and add SCALAR. self.tokens.append(self.scan_flow_scalar(style)) def fetch_plain(self): # A plain scalar could be a simple key. self.save_possible_simple_key() # No simple keys after plain scalars. But note that `scan_plain` will # change this flag if the scan is finished at the beginning of the # line. self.allow_simple_key = False # Scan and add SCALAR. May change `allow_simple_key`. self.tokens.append(self.scan_plain()) # Checkers. def check_directive(self): # DIRECTIVE: ^ '%' ... # The '%' indicator is already checked. if self.column == 0: return True def check_document_start(self): # DOCUMENT-START: ^ '---' (' '|'\n') if self.column == 0: if self.prefix(3) == '---' \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return True def check_document_end(self): # DOCUMENT-END: ^ '...' (' '|'\n') if self.column == 0: if self.prefix(3) == '...' \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return True def check_block_entry(self): # BLOCK-ENTRY: '-' (' '|'\n') return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_key(self): # KEY(flow context): '?' if self.flow_level: return True # KEY(block context): '?' (' '|'\n') else: return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_value(self): # VALUE(flow context): ':' if self.flow_level: return True # VALUE(block context): ':' (' '|'\n') else: return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' def check_plain(self): # A plain scalar may start with any non-space character except: # '-', '?', ':', ',', '[', ']', '{', '}', # '#', '&', '*', '!', '|', '>', '\'', '\"', # '%', '@', '`'. # # It may also start with # '-', '?', ':' # if it is followed by a non-space character. # # Note that we limit the last rule to the block context (except the # '-' character) because we want the flow context to be space # independent. ch = self.peek() return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' and (ch == '-' or (not self.flow_level and ch in '?:'))) # Scanners. def scan_to_next_token(self): # We ignore spaces, line breaks and comments. # If we find a line break in the block context, we set the flag # `allow_simple_key` on. # The byte order mark is stripped if it's the first character in the # stream. We do not yet support BOM inside the stream as the # specification requires. Any such mark will be considered as a part # of the document. # # TODO: We need to make tab handling rules more sane. A good rule is # Tabs cannot precede tokens # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, # KEY(block), VALUE(block), BLOCK-ENTRY # So the checking code is # if <TAB>: # self.allow_simple_keys = False # We also need to add the check for `allow_simple_keys == True` to # `unwind_indent` before issuing BLOCK-END. # Scanners for block, flow, and plain scalars need to be modified. if self.index == 0 and self.peek() == '\uFEFF': self.forward() found = False while not found: while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() if self.scan_line_break(): if not self.flow_level: self.allow_simple_key = True else: found = True def scan_directive(self): # See the specification for details. start_mark = self.get_mark() self.forward() name = self.scan_directive_name(start_mark) value = None if name == 'YAML': value = self.scan_yaml_directive_value(start_mark) end_mark = self.get_mark() elif name == 'TAG': value = self.scan_tag_directive_value(start_mark) end_mark = self.get_mark() else: end_mark = self.get_mark() while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() self.scan_directive_ignored_line(start_mark) return DirectiveToken(name, value, start_mark, end_mark) def scan_directive_name(self, start_mark): # See the specification for details. length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) return value def scan_yaml_directive_value(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() major = self.scan_yaml_directive_number(start_mark) if self.peek() != '.': raise ScannerError("while scanning a directive", start_mark, "expected a digit or '.', but found %r" % self.peek(), self.get_mark()) self.forward() minor = self.scan_yaml_directive_number(start_mark) if self.peek() not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a digit or ' ', but found %r" % self.peek(), self.get_mark()) return (major, minor) def scan_yaml_directive_number(self, start_mark): # See the specification for details. ch = self.peek() if not ('0' <= ch <= '9'): raise ScannerError("while scanning a directive", start_mark, "expected a digit, but found %r" % ch, self.get_mark()) length = 0 while '0' <= self.peek(length) <= '9': length += 1 value = int(self.prefix(length)) self.forward(length) return value def scan_tag_directive_value(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() handle = self.scan_tag_directive_handle(start_mark) while self.peek() == ' ': self.forward() prefix = self.scan_tag_directive_prefix(start_mark) return (handle, prefix) def scan_tag_directive_handle(self, start_mark): # See the specification for details. value = self.scan_tag_handle('directive', start_mark) ch = self.peek() if ch != ' ': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) return value def scan_tag_directive_prefix(self, start_mark): # See the specification for details. value = self.scan_tag_uri('directive', start_mark) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) return value def scan_directive_ignored_line(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in '\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a comment or a line break, but found %r" % ch, self.get_mark()) self.scan_line_break() def scan_anchor(self, TokenClass): # The specification does not restrict characters for anchors and # aliases. This may lead to problems, for instance, the document: # [ *alias, value ] # can be interpreted in two ways, as # [ "value" ] # and # [ *alias , "value" ] # Therefore we restrict aliases to numbers and ASCII letters. start_mark = self.get_mark() indicator = self.peek() if indicator == '*': name = 'alias' else: name = 'anchor' self.forward() length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch, self.get_mark()) end_mark = self.get_mark() return TokenClass(value, start_mark, end_mark) def scan_tag(self): # See the specification for details. start_mark = self.get_mark() ch = self.peek(1) if ch == '<': handle = None self.forward(2) suffix = self.scan_tag_uri('tag', start_mark) if self.peek() != '>': raise ScannerError("while parsing a tag", start_mark, "expected '>', but found %r" % self.peek(), self.get_mark()) self.forward() elif ch in '\0 \t\r\n\x85\u2028\u2029': handle = None suffix = '!' self.forward() else: length = 1 use_handle = False while ch not in '\0 \r\n\x85\u2028\u2029': if ch == '!': use_handle = True break length += 1 ch = self.peek(length) handle = '!' if use_handle: handle = self.scan_tag_handle('tag', start_mark) else: handle = '!' self.forward() suffix = self.scan_tag_uri('tag', start_mark) ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a tag", start_mark, "expected ' ', but found %r" % ch, self.get_mark()) value = (handle, suffix) end_mark = self.get_mark() return TagToken(value, start_mark, end_mark) def scan_block_scalar(self, style): # See the specification for details. if style == '>': folded = True else: folded = False chunks = [] start_mark = self.get_mark() # Scan the header. self.forward() chomping, increment = self.scan_block_scalar_indicators(start_mark) self.scan_block_scalar_ignored_line(start_mark) # Determine the indentation level and go to the first non-empty line. min_indent = self.indent+1 if min_indent < 1: min_indent = 1 if increment is None: breaks, max_indent, end_mark = self.scan_block_scalar_indentation() indent = max(min_indent, max_indent) else: indent = min_indent+increment-1 breaks, end_mark = self.scan_block_scalar_breaks(indent) line_break = '' # Scan the inner part of the block scalar. while self.column == indent and self.peek() != '\0': chunks.extend(breaks) leading_non_space = self.peek() not in ' \t' length = 0 while self.peek(length) not in '\0\r\n\x85\u2028\u2029': length += 1 chunks.append(self.prefix(length)) self.forward(length) line_break = self.scan_line_break() breaks, end_mark = self.scan_block_scalar_breaks(indent) if self.column == indent and self.peek() != '\0': # Unfortunately, folding rules are ambiguous. # # This is the folding according to the specification: if folded and line_break == '\n' \ and leading_non_space and self.peek() not in ' \t': if not breaks: chunks.append(' ') else: chunks.append(line_break) # This is Clark Evans's interpretation (also in the spec # examples): # #if folded and line_break == '\n': # if not breaks: # if self.peek() not in ' \t': # chunks.append(' ') # else: # chunks.append(line_break) #else: # chunks.append(line_break) else: break # Chomp the tail. if chomping is not False: chunks.append(line_break) if chomping is True: chunks.extend(breaks) # We are done. return ScalarToken(''.join(chunks), False, start_mark, end_mark, style) def scan_block_scalar_indicators(self, start_mark): # See the specification for details. chomping = None increment = None ch = self.peek() if ch in '+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch in '0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() elif ch in '0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() ch = self.peek() if ch in '+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch not in '\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected chomping or indentation indicators, but found %r" % ch, self.get_mark()) return chomping, increment def scan_block_scalar_ignored_line(self, start_mark): # See the specification for details. while self.peek() == ' ': self.forward() if self.peek() == '#': while self.peek() not in '\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in '\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected a comment or a line break, but found %r" % ch, self.get_mark()) self.scan_line_break() def scan_block_scalar_indentation(self): # See the specification for details. chunks = [] max_indent = 0 end_mark = self.get_mark() while self.peek() in ' \r\n\x85\u2028\u2029': if self.peek() != ' ': chunks.append(self.scan_line_break()) end_mark = self.get_mark() else: self.forward() if self.column > max_indent: max_indent = self.column return chunks, max_indent, end_mark def scan_block_scalar_breaks(self, indent): # See the specification for details. chunks = [] end_mark = self.get_mark() while self.column < indent and self.peek() == ' ': self.forward() while self.peek() in '\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) end_mark = self.get_mark() while self.column < indent and self.peek() == ' ': self.forward() return chunks, end_mark def scan_flow_scalar(self, style): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don't need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. if style == '"': double = True else: double = False chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) self.forward() end_mark = self.get_mark() return ScalarToken(''.join(chunks), False, start_mark, end_mark, style) ESCAPE_REPLACEMENTS = { '0': '\0', 'a': '\x07', 'b': '\x08', 't': '\x09', '\t': '\x09', 'n': '\x0A', 'v': '\x0B', 'f': '\x0C', 'r': '\x0D', 'e': '\x1B', ' ': '\x20', '\"': '\"', '\\': '\\', '/': '/', 'N': '\x85', '_': '\xA0', 'L': '\u2028', 'P': '\u2029', } ESCAPE_CODES = { 'x': 2, 'u': 4, 'U': 8, } def scan_flow_scalar_non_spaces(self, double, start_mark): # See the specification for details. chunks = [] while True: length = 0 while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': length += 1 if length: chunks.append(self.prefix(length)) self.forward(length) ch = self.peek() if not double and ch == '\'' and self.peek(1) == '\'': chunks.append('\'') self.forward(2) elif (double and ch == '\'') or (not double and ch in '\"\\'): chunks.append(ch) self.forward() elif double and ch == '\\': self.forward() ch = self.peek() if ch in self.ESCAPE_REPLACEMENTS: chunks.append(self.ESCAPE_REPLACEMENTS[ch]) self.forward() elif ch in self.ESCAPE_CODES: length = self.ESCAPE_CODES[ch] self.forward() for k in range(length): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, "expected escape sequence of %d hexadecimal numbers, but found %r" % (length, self.peek(k)), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(chr(code)) self.forward(length) elif ch in '\r\n\x85\u2028\u2029': self.scan_line_break() chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) else: raise ScannerError("while scanning a double-quoted scalar", start_mark, "found unknown escape character %r" % ch, self.get_mark()) else: return chunks def scan_flow_scalar_spaces(self, double, start_mark): # See the specification for details. chunks = [] length = 0 while self.peek(length) in ' \t': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch == '\0': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected end of stream", self.get_mark()) elif ch in '\r\n\x85\u2028\u2029': line_break = self.scan_line_break() breaks = self.scan_flow_scalar_breaks(double, start_mark) if line_break != '\n': chunks.append(line_break) elif not breaks: chunks.append(' ') chunks.extend(breaks) else: chunks.append(whitespaces) return chunks def scan_flow_scalar_breaks(self, double, start_mark): # See the specification for details. chunks = [] while True: # Instead of checking indentation, we check for document # separators. prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected document separator", self.get_mark()) while self.peek() in ' \t': self.forward() if self.peek() in '\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) else: return chunks def scan_plain(self): # See the specification for details. # We add an additional restriction for the flow context: # plain scalars in the flow context cannot contain ',' or '?'. # We also keep track of the `allow_simple_key` flag here. # Indentation rules are loosed for the flow context. chunks = [] start_mark = self.get_mark() end_mark = start_mark indent = self.indent+1 # We allow zero indentation for scalars, but then we need to check for # document separators at the beginning of the line. #if indent == 0: # indent = 1 spaces = [] while True: length = 0 if self.peek() == '#': break while True: ch = self.peek(length) if ch in '\0 \t\r\n\x85\u2028\u2029' \ or (ch == ':' and self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029' + (u',[]{}' if self.flow_level else u''))\ or (self.flow_level and ch in ',?[]{}'): break length += 1 if length == 0: break self.allow_simple_key = False chunks.extend(spaces) chunks.append(self.prefix(length)) self.forward(length) end_mark = self.get_mark() spaces = self.scan_plain_spaces(indent, start_mark) if not spaces or self.peek() == '#' \ or (not self.flow_level and self.column < indent): break return ScalarToken(''.join(chunks), True, start_mark, end_mark) def scan_plain_spaces(self, indent, start_mark): # See the specification for details. # The specification is really confusing about tabs in plain scalars. # We just forbid them completely. Do not use tabs in YAML! chunks = [] length = 0 while self.peek(length) in ' ': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch in '\r\n\x85\u2028\u2029': line_break = self.scan_line_break() self.allow_simple_key = True prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return breaks = [] while self.peek() in ' \r\n\x85\u2028\u2029': if self.peek() == ' ': self.forward() else: breaks.append(self.scan_line_break()) prefix = self.prefix(3) if (prefix == '---' or prefix == '...') \ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': return if line_break != '\n': chunks.append(line_break) elif not breaks: chunks.append(' ') chunks.extend(breaks) elif whitespaces: chunks.append(whitespaces) return chunks def scan_tag_handle(self, name, start_mark): # See the specification for details. # For some strange reasons, the specification does not allow '_' in # tag handles. I have allowed it anyway. ch = self.peek() if ch != '!': raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch, self.get_mark()) length = 1 ch = self.peek(length) if ch != ' ': while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-_': length += 1 ch = self.peek(length) if ch != '!': self.forward(length) raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch, self.get_mark()) length += 1 value = self.prefix(length) self.forward(length) return value def scan_tag_uri(self, name, start_mark): # See the specification for details. # Note: we do not check if URI is well-formed. chunks = [] length = 0 ch = self.peek(length) while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ or ch in '-;/?:@&=+$,_.!~*\'()[]%': if ch == '%': chunks.append(self.prefix(length)) self.forward(length) length = 0 chunks.append(self.scan_uri_escapes(name, start_mark)) else: length += 1 ch = self.peek(length) if length: chunks.append(self.prefix(length)) self.forward(length) length = 0 if not chunks: raise ScannerError("while parsing a %s" % name, start_mark, "expected URI, but found %r" % ch, self.get_mark()) return ''.join(chunks) def scan_uri_escapes(self, name, start_mark): # See the specification for details. codes = [] mark = self.get_mark() while self.peek() == '%': self.forward() for k in range(2): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, "expected URI escape sequence of 2 hexadecimal numbers, but found %r" % self.peek(k), self.get_mark()) codes.append(int(self.prefix(2), 16)) self.forward(2) try: value = bytes(codes).decode('utf-8') except UnicodeDecodeError as exc: raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) return value def scan_line_break(self): # Transforms: # '\r\n' : '\n' # '\r' : '\n' # '\n' : '\n' # '\x85' : '\n' # '\u2028' : '\u2028' # '\u2029 : '\u2029' # default : '' ch = self.peek() if ch in '\r\n\x85': if self.prefix(2) == '\r\n': self.forward(2) else: self.forward() return '\n' elif ch in '\u2028\u2029': self.forward() return ch return ''
51,279
Python
.py
1,216
30.636513
104
0.544283
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,442
resolver.py
rembo10_headphones/lib/yaml/resolver.py
__all__ = ['BaseResolver', 'Resolver'] from .error import * from .nodes import * import re class ResolverError(YAMLError): pass class BaseResolver: DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' yaml_implicit_resolvers = {} yaml_path_resolvers = {} def __init__(self): self.resolver_exact_paths = [] self.resolver_prefix_paths = [] @classmethod def add_implicit_resolver(cls, tag, regexp, first): if not 'yaml_implicit_resolvers' in cls.__dict__: implicit_resolvers = {} for key in cls.yaml_implicit_resolvers: implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] cls.yaml_implicit_resolvers = implicit_resolvers if first is None: first = [None] for ch in first: cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) @classmethod def add_path_resolver(cls, tag, path, kind=None): # Note: `add_path_resolver` is experimental. The API could be changed. # `new_path` is a pattern that is matched against the path from the # root to the node that is being considered. `node_path` elements are # tuples `(node_check, index_check)`. `node_check` is a node class: # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` # matches any kind of a node. `index_check` could be `None`, a boolean # value, a string value, or a number. `None` and `False` match against # any _value_ of sequence and mapping nodes. `True` matches against # any _key_ of a mapping node. A string `index_check` matches against # a mapping value that corresponds to a scalar key which content is # equal to the `index_check` value. An integer `index_check` matches # against a sequence value with the index equal to `index_check`. if not 'yaml_path_resolvers' in cls.__dict__: cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() new_path = [] for element in path: if isinstance(element, (list, tuple)): if len(element) == 2: node_check, index_check = element elif len(element) == 1: node_check = element[0] index_check = True else: raise ResolverError("Invalid path element: %s" % element) else: node_check = None index_check = element if node_check is str: node_check = ScalarNode elif node_check is list: node_check = SequenceNode elif node_check is dict: node_check = MappingNode elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ and not isinstance(node_check, str) \ and node_check is not None: raise ResolverError("Invalid node checker: %s" % node_check) if not isinstance(index_check, (str, int)) \ and index_check is not None: raise ResolverError("Invalid index checker: %s" % index_check) new_path.append((node_check, index_check)) if kind is str: kind = ScalarNode elif kind is list: kind = SequenceNode elif kind is dict: kind = MappingNode elif kind not in [ScalarNode, SequenceNode, MappingNode] \ and kind is not None: raise ResolverError("Invalid node kind: %s" % kind) cls.yaml_path_resolvers[tuple(new_path), kind] = tag def descend_resolver(self, current_node, current_index): if not self.yaml_path_resolvers: return exact_paths = {} prefix_paths = [] if current_node: depth = len(self.resolver_prefix_paths) for path, kind in self.resolver_prefix_paths[-1]: if self.check_resolver_prefix(depth, path, kind, current_node, current_index): if len(path) > depth: prefix_paths.append((path, kind)) else: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: for path, kind in self.yaml_path_resolvers: if not path: exact_paths[kind] = self.yaml_path_resolvers[path, kind] else: prefix_paths.append((path, kind)) self.resolver_exact_paths.append(exact_paths) self.resolver_prefix_paths.append(prefix_paths) def ascend_resolver(self): if not self.yaml_path_resolvers: return self.resolver_exact_paths.pop() self.resolver_prefix_paths.pop() def check_resolver_prefix(self, depth, path, kind, current_node, current_index): node_check, index_check = path[depth-1] if isinstance(node_check, str): if current_node.tag != node_check: return elif node_check is not None: if not isinstance(current_node, node_check): return if index_check is True and current_index is not None: return if (index_check is False or index_check is None) \ and current_index is None: return if isinstance(index_check, str): if not (isinstance(current_index, ScalarNode) and index_check == current_index.value): return elif isinstance(index_check, int) and not isinstance(index_check, bool): if index_check != current_index: return return True def resolve(self, kind, value, implicit): if kind is ScalarNode and implicit[0]: if value == '': resolvers = self.yaml_implicit_resolvers.get('', []) else: resolvers = self.yaml_implicit_resolvers.get(value[0], []) wildcard_resolvers = self.yaml_implicit_resolvers.get(None, []) for tag, regexp in resolvers + wildcard_resolvers: if regexp.match(value): return tag implicit = implicit[1] if self.yaml_path_resolvers: exact_paths = self.resolver_exact_paths[-1] if kind in exact_paths: return exact_paths[kind] if None in exact_paths: return exact_paths[None] if kind is ScalarNode: return self.DEFAULT_SCALAR_TAG elif kind is SequenceNode: return self.DEFAULT_SEQUENCE_TAG elif kind is MappingNode: return self.DEFAULT_MAPPING_TAG class Resolver(BaseResolver): pass Resolver.add_implicit_resolver( 'tag:yaml.org,2002:bool', re.compile(r'''^(?:yes|Yes|YES|no|No|NO |true|True|TRUE|false|False|FALSE |on|On|ON|off|Off|OFF)$''', re.X), list('yYnNtTfFoO')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:float', re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? |\.[0-9][0-9_]*(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), list('-+0123456789.')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:int', re.compile(r'''^(?:[-+]?0b[0-1_]+ |[-+]?0[0-7_]+ |[-+]?(?:0|[1-9][0-9_]*) |[-+]?0x[0-9a-fA-F_]+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), list('-+0123456789')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:merge', re.compile(r'^(?:<<)$'), ['<']) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:null', re.compile(r'''^(?: ~ |null|Null|NULL | )$''', re.X), ['~', 'n', 'N', '']) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:timestamp', re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? (?:[Tt]|[ \t]+)[0-9][0-9]? :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), list('0123456789')) Resolver.add_implicit_resolver( 'tag:yaml.org,2002:value', re.compile(r'^(?:=)$'), ['=']) # The following resolver is only for documentation purposes. It cannot work # because plain scalars cannot start with '!', '&', or '*'. Resolver.add_implicit_resolver( 'tag:yaml.org,2002:yaml', re.compile(r'^(?:!|&|\*)$'), list('!&*'))
9,004
Python
.py
203
32.463054
82
0.538453
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,443
cyaml.py
rembo10_headphones/lib/yaml/cyaml.py
__all__ = [ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper' ] from yaml._yaml import CParser, CEmitter from .constructor import * from .serializer import * from .representer import * from .resolver import * class CBaseLoader(CParser, BaseConstructor, BaseResolver): def __init__(self, stream): CParser.__init__(self, stream) BaseConstructor.__init__(self) BaseResolver.__init__(self) class CSafeLoader(CParser, SafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) SafeConstructor.__init__(self) Resolver.__init__(self) class CFullLoader(CParser, FullConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) FullConstructor.__init__(self) Resolver.__init__(self) class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) UnsafeConstructor.__init__(self) Resolver.__init__(self) class CLoader(CParser, Constructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) Constructor.__init__(self) Resolver.__init__(self) class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class CSafeDumper(CEmitter, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self) class CDumper(CEmitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=False, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, sort_keys=True): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style, sort_keys=sort_keys) Resolver.__init__(self)
3,851
Python
.py
79
39.21519
76
0.6472
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,444
redis.py
rembo10_headphones/lib/logutils/redis.py
# # Copyright (C) 2011-2013 Vinay Sajip. See LICENSE.txt for details. # """ This module contains classes which help you work with Redis queues. """ from logutils.queue import QueueHandler, QueueListener try: import pickle as pickle except ImportError: import pickle class RedisQueueHandler(QueueHandler): """ A QueueHandler implementation which pushes pickled records to a Redis queue using a specified key. :param key: The key to use for the queue. Defaults to "python.logging". :param redis: If specified, this instance is used to communicate with a Redis instance. :param limit: If specified, the queue is restricted to have only this many elements. """ def __init__(self, key='python.logging', redis=None, limit=0): if redis is None: from .redis import Redis redis = Redis() self.key = key assert limit >= 0 self.limit = limit QueueHandler.__init__(self, redis) def enqueue(self, record): s = pickle.dumps(vars(record)) self.queue.rpush(self.key, s) if self.limit: self.queue.ltrim(self.key, -self.limit, -1) class RedisQueueListener(QueueListener): """ A QueueListener implementation which fetches pickled records from a Redis queue using a specified key. :param key: The key to use for the queue. Defaults to "python.logging". :param redis: If specified, this instance is used to communicate with a Redis instance. """ def __init__(self, *handlers, **kwargs): redis = kwargs.get('redis') if redis is None: from .redis import Redis redis = Redis() self.key = kwargs.get('key', 'python.logging') QueueListener.__init__(self, redis, *handlers) def dequeue(self, block): """ Dequeue and return a record. """ if block: s = self.queue.blpop(self.key)[1] else: s = self.queue.lpop(self.key) if not s: record = None else: record = pickle.loads(s) return record def enqueue_sentinel(self): self.queue.rpush(self.key, '')
2,293
Python
.py
66
26.545455
67
0.61535
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,445
queue.py
rembo10_headphones/lib/logutils/queue.py
# # Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details. # """ This module contains classes which help you work with queues. A typical application is when you want to log from performance-critical threads, but where the handlers you want to use are slow (for example, :class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue, pass it to a :class:`QueueHandler` instance and use that instance with your loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same queue and some slow handlers, and call :meth:`~QueueListener.start` on it. This will start monitoring the queue on a separate thread and call all the configured handlers *on that thread*, so that your logging thread is not held up by the slow handlers. Note that as well as in-process queues, you can use these classes with queues from the :mod:`multiprocessing` module. **N.B.** This is part of the standard library since Python 3.2, so the version here is for use with earlier Python versions. """ import logging try: import queue as queue except ImportError: from . import queue import threading class QueueHandler(logging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. :param queue: The queue to send `LogRecords` to. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ logging.Handler.__init__(self) self.queue = queue def enqueue(self, record): """ Enqueue a record. The base implementation uses :meth:`~queue.Queue.put_nowait`. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. :param record: The record to enqueue. """ self.queue.put_nowait(record) def prepare(self, record): """ Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. :param record: The record to prepare. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also puts the message into # record.message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info attribute, as it's no longer needed and, if not None, # will typically not be pickleable. self.format(record) record.msg = record.message record.args = None record.exc_info = None return record def emit(self, record): """ Emit a record. Writes the LogRecord to the queue, preparing it for pickling first. :param record: The record to emit. """ try: self.enqueue(self.prepare(record)) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class QueueListener(object): """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. :param record: The queue to listen to. :param handlers: The handlers to invoke on everything received from the queue. """ _sentinel = None def __init__(self, queue, *handlers): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._stop = threading.Event() self._thread = None def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses :meth:`~queue.Queue.get`. You may want to override this method if you want to use timeouts or work with custom queue implementations. :param block: Whether to block if the queue is empty. If `False` and the queue is empty, an :class:`~queue.Empty` exception will be thrown. """ return self.queue.get(block) def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.setDaemon(True) t.start() def prepare(self , record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. :param record: The record to prepare. """ return record def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. :param record: The record to handle. """ record = self.prepare(record) for handler in self.handlers: handler.handle(record) def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, 'task_done') while not self._stop.isSet(): try: record = self.dequeue(True) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: pass # There might still be records in the queue. while True: try: record = self.dequeue(False) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break def enqueue_sentinel(self): """ Writes a sentinel to the queue to tell the listener to quit. This implementation uses ``put_nowait()``. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel) def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self._stop.set() self.enqueue_sentinel() self._thread.join() self._thread = None
7,554
Python
.py
190
30.884211
78
0.636189
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,446
dictconfig.py
rembo10_headphones/lib/logutils/dictconfig.py
# # Copyright (C) 2009-2013 Vinay Sajip. See LICENSE.txt for details. # import logging.handlers import re import sys import types try: str except NameError: str = str try: Exception except NameError: Exception = Exception IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # # This function is defined in logging only in recent versions of Python # try: from logging import _checkLevel except ImportError: def _checkLevel(level): if isinstance(level, int): rv = level elif str(level) == level: if level not in logging._levelNames: raise ValueError('Unknown level: %r' % level) rv = logging._levelNames[level] else: raise TypeError('Level not an integer or a ' 'valid string: %r' % level) return rv # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = __import__ "Allows the importer to be redefined." def __init__(self, config): """ Initialise an instance with the specified configuration dictionary. """ self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, str): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if isinstance(c, str): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in list(props.items()): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value def named_handlers_supported(): major, minor = sys.version_info[:2] if major == 2: result = minor >= 7 elif major == 3: result = minor >= 2 else: result = (major > 3) return result class DictConfigurator(BaseConfigurator): """ Configure logging using a dictionary-like object to describe the configuration. """ def configure(self): """Do the configuration.""" config = self.config if 'version' not in config: raise ValueError("dictionary doesn't specify a version") if config['version'] != 1: raise ValueError("Unsupported version: %s" % config['version']) incremental = config.pop('incremental', False) EMPTY_DICT = {} logging._acquireLock() try: if incremental: handlers = config.get('handlers', EMPTY_DICT) # incremental handler config only if handler name # ties in to logging._handlers (Python 2.7, 3.2+) if named_handlers_supported(): for name in handlers: if name not in logging._handlers: raise ValueError('No handler found with ' 'name %r' % name) else: try: handler = logging._handlers[name] handler_config = handlers[name] level = handler_config.get('level', None) if level: handler.setLevel(_checkLevel(level)) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) loggers = config.get('loggers', EMPTY_DICT) for name in loggers: try: self.configure_logger(name, loggers[name], True) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) root = config.get('root', None) if root: try: self.configure_root(root, True) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure root ' 'logger: %s' % e) else: disable_existing = config.pop('disable_existing_loggers', True) logging._handlers.clear() del logging._handlerList[:] # Do formatters first - they don't refer to anything else formatters = config.get('formatters', EMPTY_DICT) for name in formatters: try: formatters[name] = self.configure_formatter( formatters[name]) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure ' 'formatter %r: %s' % (name, e)) # Next, do filters - they don't refer to anything else, either filters = config.get('filters', EMPTY_DICT) for name in filters: try: filters[name] = self.configure_filter(filters[name]) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure ' 'filter %r: %s' % (name, e)) # Next, do handlers - they refer to formatters and filters # As handlers can refer to other handlers, sort the keys # to allow a deterministic order of configuration handlers = config.get('handlers', EMPTY_DICT) for name in sorted(handlers): try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) # Next, do loggers - they refer to handlers and filters #we don't want to lose the existing loggers, #since other threads may have pointers to them. #existing is set to contain all existing loggers, #and as we go through the new configuration we #remove any which are configured. At the end, #what's left in existing is the set of loggers #which were in the previous configuration but #which are not in the new configuration. root = logging.root existing = sorted(root.manager.loggerDict.keys()) #The list needs to be sorted so that we can #avoid disabling child loggers of explicitly #named loggers. With a sorted list it is easier #to find the child loggers. #We'll keep the list of existing loggers #which are children of named loggers here... child_loggers = [] #now set up the new ones... loggers = config.get('loggers', EMPTY_DICT) for name in loggers: if name in existing: i = existing.index(name) prefixed = name + "." pflen = len(prefixed) num_existing = len(existing) i = i + 1 # look at the entry after name while (i < num_existing) and\ (existing[i][:pflen] == prefixed): child_loggers.append(existing[i]) i = i + 1 existing.remove(name) try: self.configure_logger(name, loggers[name]) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) #Disable any old loggers. There's no point deleting #them as other threads may continue to hold references #and by disabling them, you stop them doing any logging. #However, don't disable children of named loggers, as that's #probably not what was intended by the user. for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True elif disable_existing: logger.disabled = True # And finally, do the root logger root = config.get('root', None) if root: try: self.configure_root(root) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to configure root ' 'logger: %s' % e) finally: logging._releaseLock() def configure_formatter(self, config): """Configure a formatter from a dictionary.""" if '()' in config: factory = config['()'] # for use in exception handler try: result = self.configure_custom(config) except TypeError: te = sys.exc_info()[1] if "'format'" not in str(te): raise #Name of parameter changed from fmt to format. #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) else: fmt = config.get('format', None) dfmt = config.get('datefmt', None) result = logging.Formatter(fmt, dfmt) return result def configure_filter(self, config): """Configure a filter from a dictionary.""" if '()' in config: result = self.configure_custom(config) else: name = config.get('name', '') result = logging.Filter(name) return result def add_filters(self, filterer, filters): """Add filters to a filterer from a list of names.""" for f in filters: try: filterer.addFilter(self.config['filters'][f]) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to add filter %r: %s' % (f, e)) def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except Exception: e = sys.exc_info()[1] raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if isinstance(c, str): c = self.resolve(c) factory = c else: klass = self.resolve(config.pop('class')) #Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: config['target'] = self.config['handlers'][config['target']] except Exception: e = sys.exc_info()[1] raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError: te = sys.exc_info()[1] if "'stream'" not in str(te): raise #The argument name changed from strm to stream #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(_checkLevel(level)) if filters: self.add_filters(result, filters) return result def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except Exception: e = sys.exc_info()[1] raise ValueError('Unable to add handler %r: %s' % (h, e)) def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(_checkLevel(level)) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters) def configure_logger(self, name, config, incremental=False): """Configure a non-root logger from a dictionary.""" logger = logging.getLogger(name) self.common_logger_config(logger, config, incremental) propagate = config.get('propagate', None) if propagate is not None: logger.propagate = propagate def configure_root(self, config, incremental=False): """Configure a root logger from a dictionary.""" root = logging.getLogger() self.common_logger_config(root, config, incremental) dictConfigClass = DictConfigurator def dictConfig(config): """Configure logging using a dictionary.""" dictConfigClass(config).configure()
22,822
Python
.py
529
28.512287
80
0.517365
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,447
colorize.py
rembo10_headphones/lib/logutils/colorize.py
# # Copyright (C) 2010-2013 Vinay Sajip. All rights reserved. # import ctypes import logging import os try: str except NameError: str = None class ColorizingStreamHandler(logging.StreamHandler): """ A stream handler which supports colorizing of console streams under Windows, Linux and Mac OS X. :param strm: The stream to colorize - typically ``sys.stdout`` or ``sys.stderr``. """ # color names to indices color_map = { 'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7, } #levels to (background, foreground, bold/intense) if os.name == 'nt': level_map = { logging.DEBUG: (None, 'blue', True), logging.INFO: (None, 'white', False), logging.WARNING: (None, 'yellow', True), logging.ERROR: (None, 'red', True), logging.CRITICAL: ('red', 'white', True), } else: "Maps levels to colour/intensity settings." level_map = { logging.DEBUG: (None, 'blue', False), logging.INFO: (None, 'black', False), logging.WARNING: (None, 'yellow', False), logging.ERROR: (None, 'red', False), logging.CRITICAL: ('red', 'white', True), } csi = '\x1b[' reset = '\x1b[0m' @property def is_tty(self): "Returns true if the handler's stream is a terminal." isatty = getattr(self.stream, 'isatty', None) return isatty and isatty() def emit(self, record): try: message = self.format(record) stream = self.stream if str and isinstance(message, str): enc = getattr(stream, 'encoding', 'utf-8') message = message.encode(enc, 'replace') if not self.is_tty: stream.write(message) else: self.output_colorized(message) stream.write(getattr(self, 'terminator', '\n')) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) if os.name != 'nt': def output_colorized(self, message): """ Output a colorized message. On Linux and Mac OS X, this method just writes the already-colorized message to the stream, since on these platforms console streams accept ANSI escape sequences for colorization. On Windows, this handler implements a subset of ANSI escape sequence handling by parsing the message, extracting the sequences and making Win32 API calls to colorize the output. :param message: The message to colorize and output. """ self.stream.write(message) else: import re ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m') nt_color_map = { 0: 0x00, # black 1: 0x04, # red 2: 0x02, # green 3: 0x06, # yellow 4: 0x01, # blue 5: 0x05, # magenta 6: 0x03, # cyan 7: 0x07, # white } def output_colorized(self, message): """ Output a colorized message. On Linux and Mac OS X, this method just writes the already-colorized message to the stream, since on these platforms console streams accept ANSI escape sequences for colorization. On Windows, this handler implements a subset of ANSI escape sequence handling by parsing the message, extracting the sequences and making Win32 API calls to colorize the output. :param message: The message to colorize and output. """ parts = self.ansi_esc.split(message) write = self.stream.write h = None fd = getattr(self.stream, 'fileno', None) if fd is not None: fd = fd() if fd in (1, 2): # stdout or stderr h = ctypes.windll.kernel32.GetStdHandle(-10 - fd) while parts: text = parts.pop(0) if text: write(text) if parts: params = parts.pop(0) if h is not None: params = [int(p) for p in params.split(';')] color = 0 for p in params: if 40 <= p <= 47: color |= self.nt_color_map[p - 40] << 4 elif 30 <= p <= 37: color |= self.nt_color_map[p - 30] elif p == 1: color |= 0x08 # foreground intensity on elif p == 0: # reset to default color color = 0x07 else: pass # error condition ignored ctypes.windll.kernel32.SetConsoleTextAttribute(h, color) def colorize(self, message, record): """ Colorize a message for a logging event. This implementation uses the ``level_map`` class attribute to map the LogRecord's level to a colour/intensity setting, which is then applied to the whole message. :param message: The message to colorize. :param record: The ``LogRecord`` for the message. """ if record.levelno in self.level_map: bg, fg, bold = self.level_map[record.levelno] params = [] if bg in self.color_map: params.append(str(self.color_map[bg] + 40)) if fg in self.color_map: params.append(str(self.color_map[fg] + 30)) if bold: params.append('1') if params: message = ''.join((self.csi, ';'.join(params), 'm', message, self.reset)) return message def format(self, record): """ Formats a record for output. This implementation colorizes the message line, but leaves any traceback unolorized. """ message = logging.StreamHandler.format(self, record) if self.is_tty: # Don't colorize any traceback parts = message.split('\n', 1) parts[0] = self.colorize(parts[0], record) message = '\n'.join(parts) return message
6,696
Python
.py
173
26.231214
80
0.518006
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,448
__init__.py
rembo10_headphones/lib/logutils/__init__.py
# # Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details. # """ The logutils package provides a set of handlers for the Python standard library's logging package. Some of these handlers are out-of-scope for the standard library, and so they are packaged here. Others are updated versions which have appeared in recent Python releases, but are usable with older versions of Python, and so are packaged here. """ import logging from string import Template __version__ = '0.3.3' class NullHandler(logging.Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): """ Handle a record. Does nothing in this class, but in other handlers it typically filters and then emits the record in a thread-safe way. """ pass def emit(self, record): """ Emit a record. This does nothing and shouldn't be called during normal processing, unless you redefine :meth:`~logutils.NullHandler.handle`. """ pass def createLock(self): """ Since this handler does nothing, it has no underlying I/O to protect against multi-threaded access, so this method returns `None`. """ self.lock = None class PercentStyle(object): default_format = '%(message)s' asctime_format = '%(asctime)s' def __init__(self, fmt): self._fmt = fmt or self.default_format def usesTime(self): return self._fmt.find(self.asctime_format) >= 0 def format(self, record): return self._fmt % record.__dict__ class StrFormatStyle(PercentStyle): default_format = '{message}' asctime_format = '{asctime}' def format(self, record): return self._fmt.format(**record.__dict__) class StringTemplateStyle(PercentStyle): default_format = '${message}' asctime_format = '${asctime}' def __init__(self, fmt): self._fmt = fmt or self.default_format self._tpl = Template(self._fmt) def usesTime(self): fmt = self._fmt return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0 def format(self, record): return self._tpl.substitute(**record.__dict__) _STYLES = { '%': PercentStyle, '{': StrFormatStyle, '$': StringTemplateStyle } class Formatter(logging.Formatter): """ Subclasses Formatter in Pythons earlier than 3.2 in order to give 3.2 Formatter behaviour with respect to allowing %-, {} or $- formatting. """ def __init__(self, fmt=None, datefmt=None, style='%'): """ Initialize the formatter with specified format strings. Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument (if omitted, you get the ISO8601 format). Use a style parameter of '%', '{' or '$' to specify that you want to use one of %-formatting, :meth:`str.format` (``{}``) formatting or :class:`string.Template` formatting in your format string. """ if style not in _STYLES: raise ValueError('Style must be one of: %s' % ','.join( list(_STYLES.keys()))) self._style = _STYLES[style](fmt) self._fmt = self._style._fmt self.datefmt = datefmt def usesTime(self): """ Check if the format uses the creation time of the record. """ return self._style.usesTime() def formatMessage(self, record): return self._style.format(record) def format(self, record): """ Format the specified record as text. The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message. """ record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) s = self.formatMessage(record) if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != "\n": s = s + "\n" s = s + record.exc_text return s class BraceMessage(object): def __init__(self, fmt, *args, **kwargs): self.fmt = fmt self.args = args self.kwargs = kwargs self.str = None def __str__(self): if self.str is None: self.str = self.fmt.format(*self.args, **self.kwargs) return self.str class DollarMessage(object): def __init__(self, fmt, **kwargs): self.fmt = fmt self.kwargs = kwargs self.str = None def __str__(self): if self.str is None: self.str = Template(self.fmt).substitute(**self.kwargs) return self.str def hasHandlers(logger): """ See if a logger has any handlers. """ rv = False while logger: if logger.handlers: rv = True break elif not logger.propagate: break else: logger = logger.parent return rv
6,219
Python
.py
161
31.118012
79
0.638648
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,449
testing.py
rembo10_headphones/lib/logutils/testing.py
# # Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details. # import logging from logging.handlers import BufferingHandler class TestHandler(BufferingHandler): """ This handler collects records in a buffer for later inspection by your unit test code. :param matcher: The :class:`~logutils.testing.Matcher` instance to use for matching. """ def __init__(self, matcher): # BufferingHandler takes a "capacity" argument # so as to know when to flush. As we're overriding # shouldFlush anyway, we can set a capacity of zero. # You can call flush() manually to clear out the # buffer. BufferingHandler.__init__(self, 0) self.formatted = [] self.matcher = matcher def shouldFlush(self): """ Should the buffer be flushed? This returns `False` - you'll need to flush manually, usually after your unit test code checks the buffer contents against your expectations. """ return False def emit(self, record): """ Saves the `__dict__` of the record in the `buffer` attribute, and the formatted records in the `formatted` attribute. :param record: The record to emit. """ self.formatted.append(self.format(record)) self.buffer.append(record.__dict__) def flush(self): """ Clears out the `buffer` and `formatted` attributes. """ BufferingHandler.flush(self) self.formatted = [] def matches(self, **kwargs): """ Look for a saved dict whose keys/values match the supplied arguments. Return `True` if found, else `False`. :param kwargs: A set of keyword arguments whose names are LogRecord attributes and whose values are what you want to match in a stored LogRecord. """ result = False for d in self.buffer: if self.matcher.matches(d, **kwargs): result = True break #if not result: # print('*** matcher failed completely on %d records' % len(self.buffer)) return result def matchall(self, kwarglist): """ Accept a list of keyword argument values and ensure that the handler's buffer of stored records matches the list one-for-one. Return `True` if exactly matched, else `False`. :param kwarglist: A list of keyword-argument dictionaries, each of which will be passed to :meth:`matches` with the corresponding record from the buffer. """ if self.count != len(kwarglist): result = False else: result = True for d, kwargs in zip(self.buffer, kwarglist): if not self.matcher.matches(d, **kwargs): result = False break return result @property def count(self): """ The number of records in the buffer. """ return len(self.buffer) class Matcher(object): """ This utility class matches a stored dictionary of :class:`logging.LogRecord` attributes with keyword arguments passed to its :meth:`~logutils.testing.Matcher.matches` method. """ _partial_matches = ('msg', 'message') """ A list of :class:`logging.LogRecord` attribute names which will be checked for partial matches. If not in this list, an exact match will be attempted. """ def matches(self, d, **kwargs): """ Try to match a single dict with the supplied arguments. Keys whose values are strings and which are in self._partial_matches will be checked for partial (i.e. substring) matches. You can extend this scheme to (for example) do regular expression matching, etc. Return `True` if found, else `False`. :param kwargs: A set of keyword arguments whose names are LogRecord attributes and whose values are what you want to match in a stored LogRecord. """ result = True for k in kwargs: v = kwargs[k] dv = d.get(k) if not self.match_value(k, dv, v): #print('*** matcher failed: %s, %r, %r' % (k, dv, v)) result = False break return result def match_value(self, k, dv, v): """ Try to match a single stored value (dv) with a supplied value (v). Return `True` if found, else `False`. :param k: The key value (LogRecord attribute name). :param dv: The stored value to match against. :param v: The value to compare with the stored value. """ if type(v) != type(dv): result = False elif type(dv) is not str or k not in self._partial_matches: result = (v == dv) else: result = dv.find(v) >= 0 #if not result: # print('*** matcher failed on %s: %r vs. %r' % (k, dv, v)) return result
5,208
Python
.py
132
29.530303
84
0.589314
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,450
adapter.py
rembo10_headphones/lib/logutils/adapter.py
# # Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details. # import logging import logutils class LoggerAdapter(object): """ An adapter for loggers which makes it easier to specify contextual information in logging output. """ def __init__(self, logger, extra): """ Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired. You can effectively pass keyword arguments as shown in the following example: adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) """ self.logger = logger self.extra = extra def process(self, msg, kwargs): """ Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs. Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs. """ kwargs["extra"] = self.extra return msg, kwargs # # Boilerplate convenience methods # def debug(self, msg, *args, **kwargs): """ Delegate a debug call to the underlying logger. """ self.log(logging.DEBUG, msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """ Delegate an info call to the underlying logger. """ self.log(logging.INFO, msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """ Delegate a warning call to the underlying logger. """ self.log(logging.WARNING, msg, *args, **kwargs) warn = warning def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger. """ self.log(logging.ERROR, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): """ Delegate an exception call to the underlying logger. """ kwargs["exc_info"] = 1 self.log(logging.ERROR, msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """ Delegate a critical call to the underlying logger. """ self.log(logging.CRITICAL, msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """ Delegate a log call to the underlying logger, after adding contextual information from this adapter instance. """ if self.isEnabledFor(level): msg, kwargs = self.process(msg, kwargs) self.logger._log(level, msg, args, **kwargs) def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ if self.logger.manager.disable >= level: return False return level >= self.getEffectiveLevel() def setLevel(self, level): """ Set the specified level on the underlying logger. """ self.logger.setLevel(level) def getEffectiveLevel(self): """ Get the effective level for the underlying logger. """ return self.logger.getEffectiveLevel() def hasHandlers(self): """ See if the underlying logger has any handlers. """ return logutils.hasHandlers(self.logger)
3,517
Python
.py
97
28.298969
74
0.61923
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,451
http.py
rembo10_headphones/lib/logutils/http.py
# # Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details. # import logging class HTTPHandler(logging.Handler): """ A class which sends records to a Web server, using either GET or POST semantics. :param host: The Web server to connect to. :param url: The URL to use for the connection. :param method: The HTTP method to use. GET and POST are supported. :param secure: set to True if HTTPS is to be used. :param credentials: Set to a username/password tuple if desired. If set, a Basic authentication header is sent. WARNING: if using credentials, make sure `secure` is `True` to avoid sending usernames and passwords in cleartext over the wire. """ def __init__(self, host, url, method="GET", secure=False, credentials=None): """ Initialize an instance. """ logging.Handler.__init__(self) method = method.upper() if method not in ["GET", "POST"]: raise ValueError("method must be GET or POST") self.host = host self.url = url self.method = method self.secure = secure self.credentials = credentials def mapLogRecord(self, record): """ Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by Franz Glasner. :param record: The record to be mapped. """ return record.__dict__ def emit(self, record): """ Emit a record. Send the record to the Web server as a percent-encoded dictionary :param record: The record to be emitted. """ try: import http.client, urllib.parse host = self.host if self.secure: h = http.client.HTTPSConnection(host) else: h = http.client.HTTPConnection(host) url = self.url data = urllib.parse.urlencode(self.mapLogRecord(record)) if self.method == "GET": if (url.find('?') >= 0): sep = '&' else: sep = '?' url = url + "%c%s" % (sep, data) h.putrequest(self.method, url) # support multiple hosts on one IP address... # need to strip optional :port from host, if present i = host.find(":") if i >= 0: host = host[:i] h.putheader("Host", host) if self.method == "POST": h.putheader("Content-type", "application/x-www-form-urlencoded") h.putheader("Content-length", str(len(data))) if self.credentials: import base64 s = ('u%s:%s' % self.credentials).encode('utf-8') s = 'Basic ' + base64.b64encode(s).strip() h.putheader('Authorization', s) h.endheaders(data if self.method == "POST" else None) h.getresponse() #can't do anything with the result except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
3,301
Python
.py
82
28.670732
80
0.551358
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,452
_itertools.py
rembo10_headphones/lib/importlib_resources/_itertools.py
from itertools import filterfalse from typing import ( Callable, Iterable, Iterator, Optional, Set, TypeVar, Union, ) # Type and type variable definitions _T = TypeVar('_T') _U = TypeVar('_U') def unique_everseen( iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None ) -> Iterator[_T]: "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen: Set[Union[_T, _U]] = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
884
Python
.py
31
22.354839
78
0.60424
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,453
_compat.py
rembo10_headphones/lib/importlib_resources/_compat.py
# flake8: noqa import abc import sys import pathlib from contextlib import suppress if sys.version_info >= (3, 10): from zipfile import Path as ZipPath # type: ignore else: from zipp import Path as ZipPath # type: ignore try: from typing import runtime_checkable # type: ignore except ImportError: def runtime_checkable(cls): # type: ignore return cls try: from typing import Protocol # type: ignore except ImportError: Protocol = abc.ABC # type: ignore class TraversableResourcesLoader: """ Adapt loaders to provide TraversableResources and other compatibility. Used primarily for Python 3.9 and earlier where the native loaders do not yet implement TraversableResources. """ def __init__(self, spec): self.spec = spec @property def path(self): return self.spec.origin def get_resource_reader(self, name): from . import readers, _adapters def _zip_reader(spec): with suppress(AttributeError): return readers.ZipReader(spec.loader, spec.name) def _namespace_reader(spec): with suppress(AttributeError, ValueError): return readers.NamespaceReader(spec.submodule_search_locations) def _available_reader(spec): with suppress(AttributeError): return spec.loader.get_resource_reader(spec.name) def _native_reader(spec): reader = _available_reader(spec) return reader if hasattr(reader, 'files') else None def _file_reader(spec): try: path = pathlib.Path(self.path) except TypeError: return None if path.exists(): return readers.FileReader(self) return ( # native reader if it supplies 'files' _native_reader(self.spec) or # local ZipReader if a zip module _zip_reader(self.spec) or # local NamespaceReader if a namespace module _namespace_reader(self.spec) or # local FileReader _file_reader(self.spec) # fallback - adapt the spec ResourceReader to TraversableReader or _adapters.CompatibilityFiles(self.spec) ) def wrap_spec(package): """ Construct a package spec with traversable compatibility on the spec/loader/reader. Supersedes _adapters.wrap_spec to use TraversableResourcesLoader from above for older Python compatibility (<3.10). """ from . import _adapters return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
2,704
Python
.py
75
27.653333
84
0.647352
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,454
_legacy.py
rembo10_headphones/lib/importlib_resources/_legacy.py
import functools import os import pathlib import types import warnings from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any from . import _common Package = Union[types.ModuleType, str] Resource = str def deprecated(func): @functools.wraps(func) def wrapper(*args, **kwargs): warnings.warn( f"{func.__name__} is deprecated. Use files() instead. " "Refer to https://importlib-resources.readthedocs.io" "/en/latest/using.html#migrating-from-legacy for migration advice.", DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapper def normalize_path(path): # type: (Any) -> str """Normalize a path by ensuring it is a string. If the resulting string contains path separators, an exception is raised. """ str_path = str(path) parent, file_name = os.path.split(str_path) if parent: raise ValueError(f'{path!r} must be only a file name') return file_name @deprecated def open_binary(package: Package, resource: Resource) -> BinaryIO: """Return a file-like object opened for binary reading of the resource.""" return (_common.files(package) / normalize_path(resource)).open('rb') @deprecated def read_binary(package: Package, resource: Resource) -> bytes: """Return the binary contents of the resource.""" return (_common.files(package) / normalize_path(resource)).read_bytes() @deprecated def open_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> TextIO: """Return a file-like object opened for text reading of the resource.""" return (_common.files(package) / normalize_path(resource)).open( 'r', encoding=encoding, errors=errors ) @deprecated def read_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict', ) -> str: """Return the decoded string of the resource. The decoding-related arguments have the same semantics as those of bytes.decode(). """ with open_text(package, resource, encoding, errors) as fp: return fp.read() @deprecated def contents(package: Package) -> Iterable[str]: """Return an iterable of entries in `package`. Note that not all entries are resources. Specifically, directories are not considered resources. Use `is_resource()` on each entry returned here to check if it is a resource or not. """ return [path.name for path in _common.files(package).iterdir()] @deprecated def is_resource(package: Package, name: str) -> bool: """True if `name` is a resource inside `package`. Directories are *not* resources. """ resource = normalize_path(name) return any( traversable.name == resource and traversable.is_file() for traversable in _common.files(package).iterdir() ) @deprecated def path( package: Package, resource: Resource, ) -> ContextManager[pathlib.Path]: """A context manager providing a file path object to the resource. If the resource does not already exist on its own on the file system, a temporary file will be created. If the file was created, the file will be deleted upon exiting the context manager (no exception is raised if the file was deleted prior to the context manager exiting). """ return _common.as_file(_common.files(package) / normalize_path(resource))
3,494
Python
.py
94
32.308511
80
0.696413
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,455
abc.py
rembo10_headphones/lib/importlib_resources/abc.py
import abc from typing import BinaryIO, Iterable, Text from ._compat import runtime_checkable, Protocol class ResourceReader(metaclass=abc.ABCMeta): """Abstract base class for loaders to provide resource reading support.""" @abc.abstractmethod def open_resource(self, resource: Text) -> BinaryIO: """Return an opened, file-like object for binary reading. The 'resource' argument is expected to represent only a file name. If the resource cannot be found, FileNotFoundError is raised. """ # This deliberately raises FileNotFoundError instead of # NotImplementedError so that if this method is accidentally called, # it'll still do the right thing. raise FileNotFoundError @abc.abstractmethod def resource_path(self, resource: Text) -> Text: """Return the file system path to the specified resource. The 'resource' argument is expected to represent only a file name. If the resource does not exist on the file system, raise FileNotFoundError. """ # This deliberately raises FileNotFoundError instead of # NotImplementedError so that if this method is accidentally called, # it'll still do the right thing. raise FileNotFoundError @abc.abstractmethod def is_resource(self, path: Text) -> bool: """Return True if the named 'path' is a resource. Files are resources, directories are not. """ raise FileNotFoundError @abc.abstractmethod def contents(self) -> Iterable[str]: """Return an iterable of entries in `package`.""" raise FileNotFoundError @runtime_checkable class Traversable(Protocol): """ An object with a subset of pathlib.Path methods suitable for traversing directories and opening files. """ @abc.abstractmethod def iterdir(self): """ Yield Traversable objects in self """ def read_bytes(self): """ Read contents of self as bytes """ with self.open('rb') as strm: return strm.read() def read_text(self, encoding=None): """ Read contents of self as text """ with self.open(encoding=encoding) as strm: return strm.read() @abc.abstractmethod def is_dir(self) -> bool: """ Return True if self is a directory """ @abc.abstractmethod def is_file(self) -> bool: """ Return True if self is a file """ @abc.abstractmethod def joinpath(self, child): """ Return Traversable child in self """ def __truediv__(self, child): """ Return Traversable child in self """ return self.joinpath(child) @abc.abstractmethod def open(self, mode='r', *args, **kwargs): """ mode may be 'r' or 'rb' to open as text or binary. Return a handle suitable for reading (same as pathlib.Path.open). When opening as text, accepts encoding parameters such as those accepted by io.TextIOWrapper. """ @abc.abstractproperty def name(self) -> str: """ The base name of this object without any parent references. """ class TraversableResources(ResourceReader): """ The required interface for providing traversable resources. """ @abc.abstractmethod def files(self): """Return a Traversable object for the loaded package.""" def open_resource(self, resource): return self.files().joinpath(resource).open('rb') def resource_path(self, resource): raise FileNotFoundError(resource) def is_resource(self, path): return self.files().joinpath(path).is_file() def contents(self): return (item.name for item in self.files().iterdir())
3,886
Python
.py
108
28.564815
78
0.647906
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,456
simple.py
rembo10_headphones/lib/importlib_resources/simple.py
""" Interface adapters for low-level readers. """ import abc import io import itertools from typing import BinaryIO, List from .abc import Traversable, TraversableResources class SimpleReader(abc.ABC): """ The minimum, low-level interface required from a resource provider. """ @abc.abstractproperty def package(self): # type: () -> str """ The name of the package for which this reader loads resources. """ @abc.abstractmethod def children(self): # type: () -> List['SimpleReader'] """ Obtain an iterable of SimpleReader for available child containers (e.g. directories). """ @abc.abstractmethod def resources(self): # type: () -> List[str] """ Obtain available named resources for this virtual package. """ @abc.abstractmethod def open_binary(self, resource): # type: (str) -> BinaryIO """ Obtain a File-like for a named resource. """ @property def name(self): return self.package.split('.')[-1] class ResourceHandle(Traversable): """ Handle to a named resource in a ResourceReader. """ def __init__(self, parent, name): # type: (ResourceContainer, str) -> None self.parent = parent self.name = name # type: ignore def is_file(self): return True def is_dir(self): return False def open(self, mode='r', *args, **kwargs): stream = self.parent.reader.open_binary(self.name) if 'b' not in mode: stream = io.TextIOWrapper(*args, **kwargs) return stream def joinpath(self, name): raise RuntimeError("Cannot traverse into a resource") class ResourceContainer(Traversable): """ Traversable container for a package's resources via its reader. """ def __init__(self, reader): # type: (SimpleReader) -> None self.reader = reader def is_dir(self): return True def is_file(self): return False def iterdir(self): files = (ResourceHandle(self, name) for name in self.reader.resources) dirs = map(ResourceContainer, self.reader.children()) return itertools.chain(files, dirs) def open(self, *args, **kwargs): raise IsADirectoryError() def joinpath(self, name): return next( traversable for traversable in self.iterdir() if traversable.name == name ) class TraversableReader(TraversableResources, SimpleReader): """ A TraversableResources based on SimpleReader. Resource providers may derive from this class to provide the TraversableResources interface by supplying the SimpleReader interface. """ def files(self): return ResourceContainer(self)
2,836
Python
.py
89
25.213483
85
0.641912
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,457
__init__.py
rembo10_headphones/lib/importlib_resources/__init__.py
"""Read resources contained within a package.""" from ._common import ( as_file, files, Package, ) from ._legacy import ( contents, open_binary, read_binary, open_text, read_text, is_resource, path, Resource, ) from importlib_resources.abc import ResourceReader __all__ = [ 'Package', 'Resource', 'ResourceReader', 'as_file', 'contents', 'files', 'is_resource', 'open_binary', 'open_text', 'path', 'read_binary', 'read_text', ]
525
Python
.py
31
12.806452
50
0.609407
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,458
_common.py
rembo10_headphones/lib/importlib_resources/_common.py
import os import pathlib import tempfile import functools import contextlib import types import importlib from typing import Union, Optional from .abc import ResourceReader, Traversable from ._compat import wrap_spec Package = Union[types.ModuleType, str] def files(package): # type: (Package) -> Traversable """ Get a Traversable resource from a package """ return from_package(get_package(package)) def get_resource_reader(package): # type: (types.ModuleType) -> Optional[ResourceReader] """ Return the package's loader if it's a ResourceReader. """ # We can't use # a issubclass() check here because apparently abc.'s __subclasscheck__() # hook wants to create a weak reference to the object, but # zipimport.zipimporter does not support weak references, resulting in a # TypeError. That seems terrible. spec = package.__spec__ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore if reader is None: return None return reader(spec.name) # type: ignore def resolve(cand): # type: (Package) -> types.ModuleType return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) def get_package(package): # type: (Package) -> types.ModuleType """Take a package name or module object and return the module. Raise an exception if the resolved module is not a package. """ resolved = resolve(package) if wrap_spec(resolved).submodule_search_locations is None: raise TypeError(f'{package!r} is not a package') return resolved def from_package(package): """ Return a Traversable object for the given package. """ spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contextmanager def _tempfile(reader, suffix=''): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, reader()) finally: os.close(fd) del reader yield pathlib.Path(raw_path) finally: try: os.remove(raw_path) except FileNotFoundError: pass @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return _tempfile(path.read_bytes, suffix=path.name) @as_file.register(pathlib.Path) @contextlib.contextmanager def _(path): """ Degenerate behavior for pathlib.Path objects. """ yield path
2,741
Python
.py
83
28.108434
88
0.699659
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,459
readers.py
rembo10_headphones/lib/importlib_resources/readers.py
import collections import pathlib import operator from . import abc from ._itertools import unique_everseen from ._compat import ZipPath def remove_duplicates(items): return iter(collections.OrderedDict.fromkeys(items)) class FileReader(abc.TraversableResources): def __init__(self, loader): self.path = pathlib.Path(loader.path).parent def resource_path(self, resource): """ Return the file system path to prevent `resources.path()` from creating a temporary copy. """ return str(self.path.joinpath(resource)) def files(self): return self.path class ZipReader(abc.TraversableResources): def __init__(self, loader, module): _, _, name = module.rpartition('.') self.prefix = loader.prefix.replace('\\', '/') + name + '/' self.archive = loader.archive def open_resource(self, resource): try: return super().open_resource(resource) except KeyError as exc: raise FileNotFoundError(exc.args[0]) def is_resource(self, path): # workaround for `zipfile.Path.is_file` returning true # for non-existent paths. target = self.files().joinpath(path) return target.is_file() and target.exists() def files(self): return ZipPath(self.archive, self.prefix) class MultiplexedPath(abc.Traversable): """ Given a series of Traversable objects, implement a merged version of the interface across all objects. Useful for namespace packages which may be multihomed at a single name. """ def __init__(self, *paths): self._paths = list(map(pathlib.Path, remove_duplicates(paths))) if not self._paths: message = 'MultiplexedPath must contain at least one path' raise FileNotFoundError(message) if not all(path.is_dir() for path in self._paths): raise NotADirectoryError('MultiplexedPath only supports directories') def iterdir(self): files = (file for path in self._paths for file in path.iterdir()) return unique_everseen(files, key=operator.attrgetter('name')) def read_bytes(self): raise FileNotFoundError(f'{self} is not a file') def read_text(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') def is_dir(self): return True def is_file(self): return False def joinpath(self, child): # first try to find child in current paths for file in self.iterdir(): if file.name == child: return file # if it does not exist, construct it with the first path return self._paths[0] / child __truediv__ = joinpath def open(self, *args, **kwargs): raise FileNotFoundError(f'{self} is not a file') @property def name(self): return self._paths[0].name def __repr__(self): paths = ', '.join(f"'{path}'" for path in self._paths) return f'MultiplexedPath({paths})' class NamespaceReader(abc.TraversableResources): def __init__(self, namespace_path): if 'NamespacePath' not in str(namespace_path): raise ValueError('Invalid path') self.path = MultiplexedPath(*list(namespace_path)) def resource_path(self, resource): """ Return the file system path to prevent `resources.path()` from creating a temporary copy. """ return str(self.path.joinpath(resource)) def files(self): return self.path
3,566
Python
.py
92
31.26087
81
0.648955
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,460
_adapters.py
rembo10_headphones/lib/importlib_resources/_adapters.py
from contextlib import suppress from io import TextIOWrapper from . import abc class SpecLoaderAdapter: """ Adapt a package spec to adapt the underlying loader. """ def __init__(self, spec, adapter=lambda spec: spec.loader): self.spec = spec self.loader = adapter(spec) def __getattr__(self, name): return getattr(self.spec, name) class TraversableResourcesLoader: """ Adapt a loader to provide TraversableResources. """ def __init__(self, spec): self.spec = spec def get_resource_reader(self, name): return CompatibilityFiles(self.spec)._native() def _io_wrapper(file, mode='r', *args, **kwargs): if mode == 'r': return TextIOWrapper(file, *args, **kwargs) elif mode == 'rb': return file raise ValueError( "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) ) class CompatibilityFiles: """ Adapter for an existing or non-existent resource reader to provide a compatibility .files(). """ class SpecPath(abc.Traversable): """ Path tied to a module spec. Can be read and exposes the resource reader children. """ def __init__(self, spec, reader): self._spec = spec self._reader = reader def iterdir(self): if not self._reader: return iter(()) return iter( CompatibilityFiles.ChildPath(self._reader, path) for path in self._reader.contents() ) def is_file(self): return False is_dir = is_file def joinpath(self, other): if not self._reader: return CompatibilityFiles.OrphanPath(other) return CompatibilityFiles.ChildPath(self._reader, other) @property def name(self): return self._spec.name def open(self, mode='r', *args, **kwargs): return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) class ChildPath(abc.Traversable): """ Path tied to a resource reader child. Can be read but doesn't expose any meaningful children. """ def __init__(self, reader, name): self._reader = reader self._name = name def iterdir(self): return iter(()) def is_file(self): return self._reader.is_resource(self.name) def is_dir(self): return not self.is_file() def joinpath(self, other): return CompatibilityFiles.OrphanPath(self.name, other) @property def name(self): return self._name def open(self, mode='r', *args, **kwargs): return _io_wrapper( self._reader.open_resource(self.name), mode, *args, **kwargs ) class OrphanPath(abc.Traversable): """ Orphan path, not tied to a module spec or resource reader. Can't be read and doesn't expose any meaningful children. """ def __init__(self, *path_parts): if len(path_parts) < 1: raise ValueError('Need at least one path part to construct a path') self._path = path_parts def iterdir(self): return iter(()) def is_file(self): return False is_dir = is_file def joinpath(self, other): return CompatibilityFiles.OrphanPath(*self._path, other) @property def name(self): return self._path[-1] def open(self, mode='r', *args, **kwargs): raise FileNotFoundError("Can't open orphan path") def __init__(self, spec): self.spec = spec @property def _reader(self): with suppress(AttributeError): return self.spec.loader.get_resource_reader(self.spec.name) def _native(self): """ Return the native reader if it supports files(). """ reader = self._reader return reader if hasattr(reader, 'files') else self def __getattr__(self, attr): return getattr(self._reader, attr) def files(self): return CompatibilityFiles.SpecPath(self.spec, self._reader) def wrap_spec(package): """ Construct a package spec with traversable compatibility on the spec/loader/reader. """ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
4,504
Python
.py
126
26.68254
87
0.594139
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,461
client.py
rembo10_headphones/lib/slskd_api/client.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. API_VERSION = 'v0' import requests from urllib.parse import urljoin from functools import reduce from base64 import b64encode from slskd_api.apis import * class HTTPAdapterTimeout(requests.adapters.HTTPAdapter): def __init__(self, timeout=None, **kwargs): super().__init__(**kwargs) self.timeout = timeout def send(self, *args, **kwargs): kwargs['timeout'] = self.timeout return super().send(*args, **kwargs) class SlskdClient: """ The main class that allows access to the different APIs of a slskd instance. An API-Key with appropriate permissions (`readwrite` for most use cases) must be set in slskd config file. Alternatively, provide your username and password. Requests error status raise corresponding error. Usage:: slskd = slskd_api.SlskdClient(host, api_key, url_base) app_status = slskd.application.state() """ def __init__(self, host: str, api_key: str = None, url_base: str = '/', username: str = None, password: str = None, token: str = None, verify_ssl: bool = True, timeout: float = None # requests timeout in seconds ): api_url = reduce(urljoin, [f'{host}/', f'{url_base}/', f'api/{API_VERSION}']) session = requests.Session() session.adapters['http://'] = HTTPAdapterTimeout(timeout=timeout) session.adapters['https://'] = HTTPAdapterTimeout(timeout=timeout) session.hooks = {'response': lambda r, *args, **kwargs: r.raise_for_status()} session.headers.update({'accept': '*/*'}) session.verify = verify_ssl header = {} if api_key: header['X-API-Key'] = api_key elif username and password: header['Authorization'] = 'Bearer ' + \ SessionApi(api_url, session).login(username, password).get('token', '') elif token: header['Authorization'] = 'Bearer ' + token else: raise ValueError('Please provide an API-Key, a valid token or username/password.') session.headers.update(header) base_args = (api_url, session) self.application = ApplicationApi(*base_args) self.conversations = ConversationsApi(*base_args) self.logs = LogsApi(*base_args) self.options = OptionsApi(*base_args) self.public_chat = PublicChatApi(*base_args) self.relay = RelayApi(*base_args) self.rooms = RoomsApi(*base_args) self.searches = SearchesApi(*base_args) self.server = ServerApi(*base_args) self.session = SessionApi(*base_args) self.shares = SharesApi(*base_args) self.transfers = TransfersApi(*base_args) self.users = UsersApi(*base_args) class MetricsApi: """ Getting the metrics works with a different endpoint. Default: <slskd_url>:5030/metrics. Metrics should be first activated in slskd config file. User/pass is independent from the main application and default value (slskd:slskd) should be changed. Usage:: metrics_api = slskd_api.MetricsApi(host, metrics_usr='slskd', metrics_pwd='slskd') metrics = metrics_api.get() """ def __init__(self, host: str, metrics_usr: str = 'slskd', metrics_pwd: str = 'slskd', metrics_url_base: str = '/metrics' ): self.metrics_url = urljoin(host, metrics_url_base) basic_auth = b64encode(bytes(f'{metrics_usr}:{metrics_pwd}', 'utf-8')) self.header = { 'accept': '*/*', 'Authorization': f'Basic {basic_auth.decode()}' } def get(self) -> str: """ Gets the Prometheus metrics as text. """ response = requests.get(self.metrics_url, headers=self.header) return response.text
4,666
Python
.py
105
36.066667
110
0.634756
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,462
__init__.py
rembo10_headphones/lib/slskd_api/__init__.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .client import SlskdClient, MetricsApi __all__ = ('SlskdClient', 'MetricsApi')
777
Python
.py
16
47.3125
74
0.775
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,463
users.py
rembo10_headphones/lib/slskd_api/apis/users.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class UsersApi(BaseApi): """ This class contains the methods to interact with the Users API. """ def address(self, username: str) -> dict: """ Retrieves the address of the specified username. """ url = self.api_url + f'/users/{quote(username)}/endpoint' response = self.session.get(url) return response.json() def browse(self, username: str) -> dict: """ Retrieves the files shared by the specified username. """ url = self.api_url + f'/users/{quote(username)}/browse' response = self.session.get(url) return response.json() def browsing_status(self, username: str) -> dict: """ Retrieves the status of the current browse operation for the specified username, if any. Will return error 404 if called after the browsing operation has ended. Best called asynchronously while :py:func:`browse` is still running. """ url = self.api_url + f'/users/{quote(username)}/browse/status' response = self.session.get(url) return response.json() def directory(self, username: str, directory: str) -> dict: """ Retrieves the files from the specified directory from the specified username. """ url = self.api_url + f'/users/{quote(username)}/directory' data = { "directory": directory } response = self.session.post(url, json=data) return response.json() def info(self, username: str) -> dict: """ Retrieves information about the specified username. """ url = self.api_url + f'/users/{quote(username)}/info' response = self.session.get(url) return response.json() def status(self, username: str) -> dict: """ Retrieves status for the specified username. """ url = self.api_url + f'/users/{quote(username)}/status' response = self.session.get(url) return response.json()
2,766
Python
.py
66
34.80303
96
0.656672
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,464
public_chat.py
rembo10_headphones/lib/slskd_api/apis/public_chat.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class PublicChatApi(BaseApi): """ [UNTESTED] This class contains the methods to interact with the PublicChat API. """ def start(self) -> bool: """ Starts public chat. :return: True if successful. """ url = self.api_url + '/publicchat' response = self.session.post(url) return response.ok def stop(self) -> bool: """ Stops public chat. :return: True if successful. """ url = self.api_url + '/publicchat' response = self.session.delete(url) return response.ok
1,313
Python
.py
35
32.342857
83
0.686661
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,465
conversations.py
rembo10_headphones/lib/slskd_api/apis/conversations.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class ConversationsApi(BaseApi): """ This class contains the methods to interact with the Conversations API. """ def acknowledge(self, username: str, id: int) -> bool: """ Acknowledges the given message id for the given username. :return: True if successful. """ url = self.api_url + f'/conversations/{quote(username)}/{id}' response = self.session.put(url) return response.ok def acknowledge_all(self, username: str) -> bool: """ Acknowledges all messages from the given username. :return: True if successful. """ url = self.api_url + f'/conversations/{quote(username)}' response = self.session.put(url) return response.ok def delete(self, username: str) -> bool: """ Closes the conversation associated with the given username. :return: True if successful. """ url = self.api_url + f'/conversations/{quote(username)}' response = self.session.delete(url) return response.ok def get(self, username: str, includeMessages: bool = True) -> dict: """ Gets the conversation associated with the specified username. """ url = self.api_url + f'/conversations/{quote(username)}' params = dict( includeMessages=includeMessages ) response = self.session.get(url, params=params) return response.json() def send(self, username: str, message: str) -> bool: """ Sends a private message to the specified username. :return: True if successful. """ url = self.api_url + f'/conversations/{quote(username)}' response = self.session.post(url, json=message) return response.ok def get_all(self, includeInactive: bool = False, unAcknowledgedOnly : bool = False) -> list: """ Gets all active conversations. """ url = self.api_url + '/conversations' params = dict( includeInactive=includeInactive, unAcknowledgedOnly=unAcknowledgedOnly ) response = self.session.get(url, params=params) return response.json() def get_messages(self, username: str, unAcknowledgedOnly : bool = False) -> list: """ Gets all messages associated with the specified username. """ url = self.api_url + f'/conversations/{quote(username)}/messages' params = dict( username=username, unAcknowledgedOnly=unAcknowledgedOnly ) response = self.session.get(url, params=params) return response.json()
3,417
Python
.py
83
33.349398
96
0.651658
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,466
rooms.py
rembo10_headphones/lib/slskd_api/apis/rooms.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class RoomsApi(BaseApi): """ This class contains the methods to interact with the Rooms API. """ def get_all_joined(self) -> list: """ Gets all joined rooms. :return: Names of the joined rooms. """ url = self.api_url + '/rooms/joined' response = self.session.get(url) return response.json() def join(self, roomName: str) -> dict: """ Joins a room. :return: room info: name, isPrivate, users, messages """ url = self.api_url + '/rooms/joined' response = self.session.post(url, json=roomName) return response.json() def get_joined(self, roomName: str) -> dict: """ Gets the specified room. :return: room info: name, isPrivate, users, messages """ url = self.api_url + f'/rooms/joined/{quote(roomName)}' response = self.session.get(url) return response.json() def leave(self, roomName: str) -> bool: """ Leaves a room. :return: True if successful. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}' response = self.session.delete(url) return response.ok def send(self, roomName: str, message: str) -> bool: """ Sends a message to the specified room. :return: True if successful. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}/messages' response = self.session.post(url, json=message) return response.ok def get_messages(self, roomName: str) -> list: """ Gets the current list of messages for the specified room. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}/messages' response = self.session.get(url) return response.json() def set_ticker(self, roomName: str, ticker: str) -> bool: """ Sets a ticker for the specified room. :return: True if successful. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}/ticker' response = self.session.post(url, json=ticker) return response.ok def add_member(self, roomName: str, username: str) -> bool: """ Adds a member to a private room. :return: True if successful. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}/members' response = self.session.post(url, json=username) return response.ok def get_users(self, roomName: str) -> list: """ Gets the current list of users for the specified joined room. """ url = self.api_url + f'/rooms/joined/{quote(roomName)}/users' response = self.session.get(url) return response.json() def get_all(self) -> list: """ Gets a list of rooms from the server. """ url = self.api_url + '/rooms/available' response = self.session.get(url) return response.json()
3,731
Python
.py
96
31.177083
74
0.623326
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,467
shares.py
rembo10_headphones/lib/slskd_api/apis/shares.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class SharesApi(BaseApi): """ This class contains the methods to interact with the Shares API. """ def get_all(self) -> dict: """ Gets the current list of shares. """ url = self.api_url + '/shares' response = self.session.get(url) return response.json() def start_scan(self) -> bool: """ Initiates a scan of the configured shares. :return: True if successful. """ url = self.api_url + '/shares' response = self.session.put(url) return response.ok def cancel_scan(self) -> bool: """ Cancels a share scan, if one is running. :return: True if successful. """ url = self.api_url + '/shares' response = self.session.delete(url) return response.ok def get(self, id: str) -> dict: """ Gets the share associated with the specified id. """ url = self.api_url + f'/shares/{id}' response = self.session.get(url) return response.json() def all_contents(self) -> list: """ Returns a list of all shared directories and files. """ url = self.api_url + '/shares/contents' response = self.session.get(url) return response.json() def contents(self, id: str) -> list: """ Gets the contents of the share associated with the specified id. """ url = self.api_url + f'/shares/{id}/contents' response = self.session.get(url) return response.json()
2,310
Python
.py
63
29.68254
74
0.633981
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,468
searches.py
rembo10_headphones/lib/slskd_api/apis/searches.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * import uuid from typing import Optional class SearchesApi(BaseApi): """ Class that handles operations on searches. """ def search_text(self, searchText: str, id: Optional[str] = None, fileLimit: int = 10000, filterResponses: bool = True, maximumPeerQueueLength: int = 1000000, minimumPeerUploadSpeed: int = 0, minimumResponseFileCount: int = 1, responseLimit: int = 100, searchTimeout: int = 15000 ) -> dict: """ Performs a search for the specified request. :param searchText: Search query :param id: uuid of the search. One will be generated if None. :param fileLimit: Max number of file results :param filterResponses: Filter unreachable users from the results :param maximumPeerQueueLength: Max queue length :param minimumPeerUploadSpeed: Min upload speed in bit/s :param minimumResponseFileCount: Min number of matching files per user :param responseLimit: Max number of users results :param searchTimeout: Search timeout in ms :return: Info about the search (no results!) """ url = self.api_url + '/searches' try: id = str(uuid.UUID(id)) # check if given id is a valid uuid except: id = str(uuid.uuid1()) # otherwise generate a new one data = { "id": id, "fileLimit": fileLimit, "filterResponses": filterResponses, "maximumPeerQueueLength": maximumPeerQueueLength, "minimumPeerUploadSpeed": minimumPeerUploadSpeed, "minimumResponseFileCount": minimumResponseFileCount, "responseLimit": responseLimit, "searchText": searchText, "searchTimeout": searchTimeout, } response = self.session.post(url, json=data) return response.json() def get_all(self) -> list: """ Gets the list of active and completed searches. """ url = self.api_url + '/searches' response = self.session.get(url) return response.json() def state(self, id: str, includeResponses: bool = False) -> dict: """ Gets the state of the search corresponding to the specified id. :param id: uuid of the search. :param includeResponses: Include responses (search result list) in the returned dict :return: Info about the search """ url = self.api_url + f'/searches/{id}' params = dict( includeResponses=includeResponses ) response = self.session.get(url, params=params) return response.json() def stop(self, id: str) -> bool: """ Stops the search corresponding to the specified id. :return: True if successful. """ url = self.api_url + f'/searches/{id}' response = self.session.put(url) return response.ok def delete(self, id: str): """ Deletes the search corresponding to the specified id. :return: True if successful. """ url = self.api_url + f'/searches/{id}' response = self.session.delete(url) return response.ok def search_responses(self, id: str) -> list: """ Gets search responses corresponding to the specified id. """ url = self.api_url + f'/searches/{id}/responses' response = self.session.get(url) return response.json()
4,368
Python
.py
106
32.037736
92
0.627042
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,469
relay.py
rembo10_headphones/lib/slskd_api/apis/relay.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class RelayApi(BaseApi): """ [UNTESTED] This class contains the methods to interact with the Relay API. """ def connect(self) -> bool: """ Connects to the configured controller. :return: True if successful. """ url = self.api_url + '/relay/agent' response = self.session.put(url) return response.ok def disconnect(self) -> bool: """ Disconnects from the connected controller. :return: True if successful. """ url = self.api_url + '/relay/agent' response = self.session.delete(url) return response.ok def download_file(self, token: str) -> bool: """ Downloads a file from the connected controller. :return: True if successful. """ url = self.api_url + f'/relay/controller/downloads/{token}' response = self.session.get(url) return response.ok def upload_file(self, token: str) -> bool: """ Uploads a file from the connected controller. :return: True if successful. """ url = self.api_url + f'/relay/controller/files/{token}' response = self.session.post(url) return response.ok def upload_share_info(self, token: str) -> bool: """ Uploads share information to the connected controller. :return: True if successful. """ url = self.api_url + f'/relay/controller/shares/{token}' response = self.session.post(url) return response.ok
2,283
Python
.py
59
31.830508
78
0.655723
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,470
logs.py
rembo10_headphones/lib/slskd_api/apis/logs.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class LogsApi(BaseApi): """ This class contains the methods to interact with the Logs API. """ def get(self) -> list: """ Gets the last few application logs. """ url = self.api_url + '/logs' response = self.session.get(url) return response.json()
1,025
Python
.py
26
35.769231
74
0.717151
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,471
options.py
rembo10_headphones/lib/slskd_api/apis/options.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class OptionsApi(BaseApi): """ This class contains the methods to interact with the Options API. """ def get(self) -> dict: """ Gets the current application options. """ url = self.api_url + '/options' response = self.session.get(url) return response.json() def get_startup(self) -> dict: """ Gets the application options provided at startup. """ url = self.api_url + '/options/startup' response = self.session.get(url) return response.json() def debug(self) -> str: """ Gets the debug view of the current application options. debug and remote_configuration must be set to true. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. """ url = self.api_url + '/options/debug' response = self.session.get(url) return response.json() def yaml_location(self) -> str: """ Gets the path of the yaml config file. remote_configuration must be set to true. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. """ url = self.api_url + '/options/yaml/location' response = self.session.get(url) return response.json() def download_yaml(self) -> str: """ Gets the content of the yaml config file as text. remote_configuration must be set to true. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. """ url = self.api_url + '/options/yaml' response = self.session.get(url) return response.json() def upload_yaml(self, yaml_content: str) -> bool: """ Sets the content of the yaml config file. remote_configuration must be set to true. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. :return: True if successful. """ url = self.api_url + '/options/yaml' response = self.session.post(url, json=yaml_content) return response.ok def validate_yaml(self, yaml_content: str) -> str: """ Validates the provided yaml string. remote_configuration must be set to true. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. :return: Empty string if validation successful. Error message otherwise. """ url = self.api_url + '/options/yaml/validate' response = self.session.post(url, json=yaml_content) return response.text
3,261
Python
.py
76
35.631579
99
0.655348
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,472
__init__.py
rembo10_headphones/lib/slskd_api/apis/__init__.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .application import ApplicationApi from .conversations import ConversationsApi from .logs import LogsApi from .options import OptionsApi from .public_chat import PublicChatApi from .relay import RelayApi from .rooms import RoomsApi from .searches import SearchesApi from .server import ServerApi from .session import SessionApi from .shares import SharesApi from .transfers import TransfersApi from .users import UsersApi __all__ = ( 'ApplicationApi', 'ConversationsApi', 'LogsApi', 'OptionsApi', 'PublicChatApi', 'RelayApi', 'RoomsApi', 'SearchesApi', 'ServerApi', 'SessionApi', 'SharesApi', 'TransfersApi', 'UsersApi' )
1,373
Python
.py
42
30.333333
74
0.772761
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,473
base.py
rembo10_headphones/lib/slskd_api/apis/base.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import requests from urllib.parse import quote class BaseApi: """ Base class where api-url and headers are set for all requests. """ def __init__(self, api_url: str, session: requests.Session): self.api_url = api_url self.session = session
966
Python
.py
23
39.391304
74
0.750266
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,474
transfers.py
rembo10_headphones/lib/slskd_api/apis/transfers.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * from typing import Union class TransfersApi(BaseApi): """ This class contains the methods to interact with the Transfers API. """ def cancel_download(self, username: str, id:str, remove: bool = False) -> bool: """ Cancels the specified download. :return: True if successful. """ url = self.api_url + f'/transfers/downloads/{quote(username)}/{id}' params = dict( remove=remove ) response = self.session.delete(url, params=params) return response.ok def get_download(self, username: str, id: str) -> dict: """ Gets the specified download. """ url = self.api_url + f'/transfers/downloads/{quote(username)}/{id}' response = self.session.get(url) return response.json() def remove_completed_downloads(self) -> bool: """ Removes all completed downloads, regardless of whether they failed or succeeded. :return: True if successful. """ url = self.api_url + '/transfers/downloads/all/completed' response = self.session.delete(url) return response.ok def cancel_upload(self, username: str, id: str, remove: bool = False) -> bool: """ Cancels the specified upload. :return: True if successful. """ url = self.api_url + f'/transfers/uploads/{quote(username)}/{id}' params = dict( remove=remove ) response = self.session.delete(url, params=params) return response.ok def get_upload(self, username: str, id: str) -> dict: """ Gets the specified upload. """ url = self.api_url + f'/transfers/uploads/{quote(username)}/{id}' response = self.session.get(url) return response.json() def remove_completed_uploads(self) -> bool: """ Removes all completed uploads, regardless of whether they failed or succeeded. :return: True if successful. """ url = self.api_url + '/transfers/uploads/all/completed' response = self.session.delete(url) return response.ok def enqueue(self, username: str, files: list) -> bool: """ Enqueues the specified download. :param username: User to download from. :param files: A list of dictionaries in the same form as what's returned by :py:func:`~slskd_api.apis.SearchesApi.search_responses`: [{'filename': <filename>, 'size': <filesize>}...] :return: True if successful. """ url = self.api_url + f'/transfers/downloads/{quote(username)}' response = self.session.post(url, json=files) return response.ok def get_downloads(self, username: str) -> dict: """ Gets all downloads for the specified username. """ url = self.api_url + f'/transfers/downloads/{quote(username)}' response = self.session.get(url) return response.json() def get_all_downloads(self, includeRemoved: bool = False) -> list: """ Gets all downloads. """ url = self.api_url + '/transfers/downloads/' params = dict( includeRemoved=includeRemoved ) response = self.session.get(url, params=params) return response.json() def get_queue_position(self, username: str, id: str) -> Union[int,str]: """ Gets the download for the specified username matching the specified filename, and requests the current place in the remote queue of the specified download. :return: Queue position or error message """ url = self.api_url + f'/transfers/downloads/{quote(username)}/{id}/position' response = self.session.get(url) return response.json() def get_all_uploads(self, includeRemoved: bool = False) -> list: """ Gets all uploads. """ url = self.api_url + '/transfers/uploads/' params = dict( includeRemoved=includeRemoved ) response = self.session.get(url, params=params) return response.json() def get_uploads(self, username: str) -> dict: """ Gets all uploads for the specified username. """ url = self.api_url + f'/transfers/uploads/{quote(username)}' response = self.session.get(url) return response.json()
5,202
Python
.py
126
32.968254
163
0.632359
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,475
application.py
rembo10_headphones/lib/slskd_api/apis/application.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class ApplicationApi(BaseApi): """ This class contains the methods to interact with the Application API. """ def state(self) -> dict: """ Gets the current state of the application. """ url = self.api_url + '/application' response = self.session.get(url) return response.json() def stop(self) -> bool: """ Stops the application. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. :return: True if successful. """ url = self.api_url + '/application' response = self.session.delete(url) return response.ok def restart(self) -> bool: """ Restarts the application. Only works with token (usr/pwd login). 'Unauthorized' with API-Key. :return: True if successful. """ url = self.api_url + '/application' response = self.session.put(url) return response.ok def version(self) -> str: """ Gets the current application version. """ url = self.api_url + '/application/version' response = self.session.get(url) return response.json() def check_updates(self, forceCheck: bool = False) -> dict: """ Checks for updates. """ url = self.api_url + '/application/version/latest' params = dict( forceCheck=forceCheck ) response = self.session.get(url, params=params) return response.json() def gc(self) -> bool: """ Forces garbage collection. :return: True if successful. """ url = self.api_url + '/application/gc' response = self.session.post(url) return response.ok # Not supposed to be part of the external API # More info in the Github discussion: https://github.com/slskd/slskd/discussions/910 # def dump(self): # url = self.api_url + '/application/dump' # response = self.session.get(url) # return response.json()
2,793
Python
.py
73
31.013699
101
0.643099
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,476
server.py
rembo10_headphones/lib/slskd_api/apis/server.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class ServerApi(BaseApi): """ This class contains the methods to interact with the Server API. """ def connect(self) -> bool: """ Connects the client. :return: True if successful. """ url = self.api_url + '/server' response = self.session.put(url) return response.ok def disconnect(self) -> bool: """ Disconnects the client. :return: True if successful. """ url = self.api_url + '/server' response = self.session.delete(url, json='') return response.ok def state(self) -> dict: """ Retrieves the current state of the server. """ url = self.api_url + '/server' response = self.session.get(url) return response.json()
1,528
Python
.py
42
30.547619
74
0.663946
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,477
session.py
rembo10_headphones/lib/slskd_api/apis/session.py
# Copyright (C) 2023 bigoulours # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .base import * class SessionApi(BaseApi): """ This class contains the methods to interact with the Session API. """ def auth_valid(self) -> bool: """ Checks whether the provided authentication is valid. """ url = self.api_url + '/session' response = self.session.get(url) return response.ok def login(self, username: str, password: str) -> dict: """ Logs in. :return: Session info for the given user incl. token. """ url = self.api_url + '/session' data = { 'username': username, 'password': password } response = self.session.post(url, json=data) return response.json() def security_enabled(self) -> bool: """ Checks whether security is enabled. """ url = self.api_url + '/session/enabled' response = self.session.get(url) return response.json()
1,675
Python
.py
45
31.022222
74
0.658204
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,478
win32.py
rembo10_headphones/lib/tzlocal/win32.py
import logging from datetime import datetime try: import _winreg as winreg except ImportError: import winreg try: import zoneinfo # pragma: no cover except ImportError: from backports import zoneinfo # pragma: no cover from tzlocal import utils from tzlocal.windows_tz import win_tz _cache_tz = None _cache_tz_name = None log = logging.getLogger("tzlocal") def valuestodict(key): """Convert a registry key's values to a dictionary.""" result = {} size = winreg.QueryInfoKey(key)[1] for i in range(size): data = winreg.EnumValue(key, i) result[data[0]] = data[1] return result def _get_dst_info(tz): # Find the offset for when it doesn't have DST: dst_offset = std_offset = None has_dst = False year = datetime.now().year for dt in (datetime(year, 1, 1), datetime(year, 6, 1)): if tz.dst(dt).total_seconds() == 0.0: # OK, no DST during winter, get this offset std_offset = tz.utcoffset(dt).total_seconds() else: has_dst = True return has_dst, std_offset, dst_offset def _get_localzone_name(): # Windows is special. It has unique time zone names (in several # meanings of the word) available, but unfortunately, they can be # translated to the language of the operating system, so we need to # do a backwards lookup, by going through all time zones and see which # one matches. tzenv = utils._tz_name_from_env() if tzenv: return tzenv log.debug("Looking up time zone info from registry") handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) keyvalues = valuestodict(localtz) localtz.Close() if "TimeZoneKeyName" in keyvalues: # Windows 7 and later # For some reason this returns a string with loads of NUL bytes at # least on some systems. I don't know if this is a bug somewhere, I # just work around it. tzkeyname = keyvalues["TimeZoneKeyName"].split("\x00", 1)[0] else: # Don't support XP any longer raise LookupError("Can not find Windows timezone configuration") timezone = win_tz.get(tzkeyname) if timezone is None: # Nope, that didn't work. Try adding "Standard Time", # it seems to work a lot of times: timezone = win_tz.get(tzkeyname + " Standard Time") # Return what we have. if timezone is None: raise zoneinfo.ZoneInfoNotFoundError(tzkeyname) if keyvalues.get("DynamicDaylightTimeDisabled", 0) == 1: # DST is disabled, so don't return the timezone name, # instead return Etc/GMT+offset tz = zoneinfo.ZoneInfo(timezone) has_dst, std_offset, dst_offset = _get_dst_info(tz) if not has_dst: # The DST is turned off in the windows configuration, # but this timezone doesn't have DST so it doesn't matter return timezone if std_offset is None: raise zoneinfo.ZoneInfoNotFoundError( f"{tzkeyname} claims to not have a non-DST time!?" ) if std_offset % 3600: # I can't convert this to an hourly offset raise zoneinfo.ZoneInfoNotFoundError( f"tzlocal can't support disabling DST in the {timezone} zone." ) # This has whole hours as offset, return it as Etc/GMT return f"Etc/GMT{-std_offset//3600:+.0f}" return timezone def get_localzone_name() -> str: """Get the zoneinfo timezone name that matches the Windows-configured timezone.""" global _cache_tz_name if _cache_tz_name is None: _cache_tz_name = _get_localzone_name() return _cache_tz_name def get_localzone() -> zoneinfo.ZoneInfo: """Returns the zoneinfo-based tzinfo object that matches the Windows-configured timezone.""" global _cache_tz if _cache_tz is None: _cache_tz = zoneinfo.ZoneInfo(get_localzone_name()) if not utils._tz_name_from_env(): # If the timezone does NOT come from a TZ environment variable, # verify that it's correct. If it's from the environment, # we accept it, this is so you can run tests with different timezones. utils.assert_tz_offset(_cache_tz, error=False) return _cache_tz def reload_localzone() -> zoneinfo.ZoneInfo: """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz global _cache_tz_name _cache_tz_name = _get_localzone_name() _cache_tz = zoneinfo.ZoneInfo(_cache_tz_name) utils.assert_tz_offset(_cache_tz, error=False) return _cache_tz
4,772
Python
.py
113
35.442478
96
0.670054
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,479
windows_tz.py
rembo10_headphones/lib/tzlocal/windows_tz.py
# This file is autogenerated by the update_windows_mapping.py script # Do not edit. win_tz = { "AUS Central Standard Time": "Australia/Darwin", "AUS Eastern Standard Time": "Australia/Sydney", "Afghanistan Standard Time": "Asia/Kabul", "Alaskan Standard Time": "America/Anchorage", "Aleutian Standard Time": "America/Adak", "Altai Standard Time": "Asia/Barnaul", "Arab Standard Time": "Asia/Riyadh", "Arabian Standard Time": "Asia/Dubai", "Arabic Standard Time": "Asia/Baghdad", "Argentina Standard Time": "America/Buenos_Aires", "Astrakhan Standard Time": "Europe/Astrakhan", "Atlantic Standard Time": "America/Halifax", "Aus Central W. Standard Time": "Australia/Eucla", "Azerbaijan Standard Time": "Asia/Baku", "Azores Standard Time": "Atlantic/Azores", "Bahia Standard Time": "America/Bahia", "Bangladesh Standard Time": "Asia/Dhaka", "Belarus Standard Time": "Europe/Minsk", "Bougainville Standard Time": "Pacific/Bougainville", "Canada Central Standard Time": "America/Regina", "Cape Verde Standard Time": "Atlantic/Cape_Verde", "Caucasus Standard Time": "Asia/Yerevan", "Cen. Australia Standard Time": "Australia/Adelaide", "Central America Standard Time": "America/Guatemala", "Central Asia Standard Time": "Asia/Almaty", "Central Brazilian Standard Time": "America/Cuiaba", "Central Europe Standard Time": "Europe/Budapest", "Central European Standard Time": "Europe/Warsaw", "Central Pacific Standard Time": "Pacific/Guadalcanal", "Central Standard Time": "America/Chicago", "Central Standard Time (Mexico)": "America/Mexico_City", "Chatham Islands Standard Time": "Pacific/Chatham", "China Standard Time": "Asia/Shanghai", "Cuba Standard Time": "America/Havana", "Dateline Standard Time": "Etc/GMT+12", "E. Africa Standard Time": "Africa/Nairobi", "E. Australia Standard Time": "Australia/Brisbane", "E. Europe Standard Time": "Europe/Chisinau", "E. South America Standard Time": "America/Sao_Paulo", "Easter Island Standard Time": "Pacific/Easter", "Eastern Standard Time": "America/New_York", "Eastern Standard Time (Mexico)": "America/Cancun", "Egypt Standard Time": "Africa/Cairo", "Ekaterinburg Standard Time": "Asia/Yekaterinburg", "FLE Standard Time": "Europe/Kiev", "Fiji Standard Time": "Pacific/Fiji", "GMT Standard Time": "Europe/London", "GTB Standard Time": "Europe/Bucharest", "Georgian Standard Time": "Asia/Tbilisi", "Greenland Standard Time": "America/Godthab", "Greenwich Standard Time": "Atlantic/Reykjavik", "Haiti Standard Time": "America/Port-au-Prince", "Hawaiian Standard Time": "Pacific/Honolulu", "India Standard Time": "Asia/Calcutta", "Iran Standard Time": "Asia/Tehran", "Israel Standard Time": "Asia/Jerusalem", "Jordan Standard Time": "Asia/Amman", "Kaliningrad Standard Time": "Europe/Kaliningrad", "Korea Standard Time": "Asia/Seoul", "Libya Standard Time": "Africa/Tripoli", "Line Islands Standard Time": "Pacific/Kiritimati", "Lord Howe Standard Time": "Australia/Lord_Howe", "Magadan Standard Time": "Asia/Magadan", "Magallanes Standard Time": "America/Punta_Arenas", "Marquesas Standard Time": "Pacific/Marquesas", "Mauritius Standard Time": "Indian/Mauritius", "Middle East Standard Time": "Asia/Beirut", "Montevideo Standard Time": "America/Montevideo", "Morocco Standard Time": "Africa/Casablanca", "Mountain Standard Time": "America/Denver", "Mountain Standard Time (Mexico)": "America/Mazatlan", "Myanmar Standard Time": "Asia/Rangoon", "N. Central Asia Standard Time": "Asia/Novosibirsk", "Namibia Standard Time": "Africa/Windhoek", "Nepal Standard Time": "Asia/Katmandu", "New Zealand Standard Time": "Pacific/Auckland", "Newfoundland Standard Time": "America/St_Johns", "Norfolk Standard Time": "Pacific/Norfolk", "North Asia East Standard Time": "Asia/Irkutsk", "North Asia Standard Time": "Asia/Krasnoyarsk", "North Korea Standard Time": "Asia/Pyongyang", "Omsk Standard Time": "Asia/Omsk", "Pacific SA Standard Time": "America/Santiago", "Pacific Standard Time": "America/Los_Angeles", "Pacific Standard Time (Mexico)": "America/Tijuana", "Pakistan Standard Time": "Asia/Karachi", "Paraguay Standard Time": "America/Asuncion", "Qyzylorda Standard Time": "Asia/Qyzylorda", "Romance Standard Time": "Europe/Paris", "Russia Time Zone 10": "Asia/Srednekolymsk", "Russia Time Zone 11": "Asia/Kamchatka", "Russia Time Zone 3": "Europe/Samara", "Russian Standard Time": "Europe/Moscow", "SA Eastern Standard Time": "America/Cayenne", "SA Pacific Standard Time": "America/Bogota", "SA Western Standard Time": "America/La_Paz", "SE Asia Standard Time": "Asia/Bangkok", "Saint Pierre Standard Time": "America/Miquelon", "Sakhalin Standard Time": "Asia/Sakhalin", "Samoa Standard Time": "Pacific/Apia", "Sao Tome Standard Time": "Africa/Sao_Tome", "Saratov Standard Time": "Europe/Saratov", "Singapore Standard Time": "Asia/Singapore", "South Africa Standard Time": "Africa/Johannesburg", "South Sudan Standard Time": "Africa/Juba", "Sri Lanka Standard Time": "Asia/Colombo", "Sudan Standard Time": "Africa/Khartoum", "Syria Standard Time": "Asia/Damascus", "Taipei Standard Time": "Asia/Taipei", "Tasmania Standard Time": "Australia/Hobart", "Tocantins Standard Time": "America/Araguaina", "Tokyo Standard Time": "Asia/Tokyo", "Tomsk Standard Time": "Asia/Tomsk", "Tonga Standard Time": "Pacific/Tongatapu", "Transbaikal Standard Time": "Asia/Chita", "Turkey Standard Time": "Europe/Istanbul", "Turks And Caicos Standard Time": "America/Grand_Turk", "US Eastern Standard Time": "America/Indianapolis", "US Mountain Standard Time": "America/Phoenix", "UTC": "Etc/UTC", "UTC+12": "Etc/GMT-12", "UTC+13": "Etc/GMT-13", "UTC-02": "Etc/GMT+2", "UTC-08": "Etc/GMT+8", "UTC-09": "Etc/GMT+9", "UTC-11": "Etc/GMT+11", "Ulaanbaatar Standard Time": "Asia/Ulaanbaatar", "Venezuela Standard Time": "America/Caracas", "Vladivostok Standard Time": "Asia/Vladivostok", "Volgograd Standard Time": "Europe/Volgograd", "W. Australia Standard Time": "Australia/Perth", "W. Central Africa Standard Time": "Africa/Lagos", "W. Europe Standard Time": "Europe/Berlin", "W. Mongolia Standard Time": "Asia/Hovd", "West Asia Standard Time": "Asia/Tashkent", "West Bank Standard Time": "Asia/Hebron", "West Pacific Standard Time": "Pacific/Port_Moresby", "Yakutsk Standard Time": "Asia/Yakutsk", "Yukon Standard Time": "America/Whitehorse", } # Old name for the win_tz variable: tz_names = win_tz tz_win = { "": "Central Standard Time (Mexico)", "Africa/Abidjan": "Greenwich Standard Time", "Africa/Accra": "Greenwich Standard Time", "Africa/Addis_Ababa": "E. Africa Standard Time", "Africa/Algiers": "W. Central Africa Standard Time", "Africa/Asmara": "E. Africa Standard Time", "Africa/Asmera": "E. Africa Standard Time", "Africa/Bamako": "Greenwich Standard Time", "Africa/Bangui": "W. Central Africa Standard Time", "Africa/Banjul": "Greenwich Standard Time", "Africa/Bissau": "Greenwich Standard Time", "Africa/Blantyre": "South Africa Standard Time", "Africa/Brazzaville": "W. Central Africa Standard Time", "Africa/Bujumbura": "South Africa Standard Time", "Africa/Cairo": "Egypt Standard Time", "Africa/Casablanca": "Morocco Standard Time", "Africa/Ceuta": "Romance Standard Time", "Africa/Conakry": "Greenwich Standard Time", "Africa/Dakar": "Greenwich Standard Time", "Africa/Dar_es_Salaam": "E. Africa Standard Time", "Africa/Djibouti": "E. Africa Standard Time", "Africa/Douala": "W. Central Africa Standard Time", "Africa/El_Aaiun": "Morocco Standard Time", "Africa/Freetown": "Greenwich Standard Time", "Africa/Gaborone": "South Africa Standard Time", "Africa/Harare": "South Africa Standard Time", "Africa/Johannesburg": "South Africa Standard Time", "Africa/Juba": "South Sudan Standard Time", "Africa/Kampala": "E. Africa Standard Time", "Africa/Khartoum": "Sudan Standard Time", "Africa/Kigali": "South Africa Standard Time", "Africa/Kinshasa": "W. Central Africa Standard Time", "Africa/Lagos": "W. Central Africa Standard Time", "Africa/Libreville": "W. Central Africa Standard Time", "Africa/Lome": "Greenwich Standard Time", "Africa/Luanda": "W. Central Africa Standard Time", "Africa/Lubumbashi": "South Africa Standard Time", "Africa/Lusaka": "South Africa Standard Time", "Africa/Malabo": "W. Central Africa Standard Time", "Africa/Maputo": "South Africa Standard Time", "Africa/Maseru": "South Africa Standard Time", "Africa/Mbabane": "South Africa Standard Time", "Africa/Mogadishu": "E. Africa Standard Time", "Africa/Monrovia": "Greenwich Standard Time", "Africa/Nairobi": "E. Africa Standard Time", "Africa/Ndjamena": "W. Central Africa Standard Time", "Africa/Niamey": "W. Central Africa Standard Time", "Africa/Nouakchott": "Greenwich Standard Time", "Africa/Ouagadougou": "Greenwich Standard Time", "Africa/Porto-Novo": "W. Central Africa Standard Time", "Africa/Sao_Tome": "Sao Tome Standard Time", "Africa/Timbuktu": "Greenwich Standard Time", "Africa/Tripoli": "Libya Standard Time", "Africa/Tunis": "W. Central Africa Standard Time", "Africa/Windhoek": "Namibia Standard Time", "America/Adak": "Aleutian Standard Time", "America/Anchorage": "Alaskan Standard Time", "America/Anguilla": "SA Western Standard Time", "America/Antigua": "SA Western Standard Time", "America/Araguaina": "Tocantins Standard Time", "America/Argentina/Buenos_Aires": "Argentina Standard Time", "America/Argentina/Catamarca": "Argentina Standard Time", "America/Argentina/ComodRivadavia": "Argentina Standard Time", "America/Argentina/Cordoba": "Argentina Standard Time", "America/Argentina/Jujuy": "Argentina Standard Time", "America/Argentina/La_Rioja": "Argentina Standard Time", "America/Argentina/Mendoza": "Argentina Standard Time", "America/Argentina/Rio_Gallegos": "Argentina Standard Time", "America/Argentina/Salta": "Argentina Standard Time", "America/Argentina/San_Juan": "Argentina Standard Time", "America/Argentina/San_Luis": "Argentina Standard Time", "America/Argentina/Tucuman": "Argentina Standard Time", "America/Argentina/Ushuaia": "Argentina Standard Time", "America/Aruba": "SA Western Standard Time", "America/Asuncion": "Paraguay Standard Time", "America/Atikokan": "SA Pacific Standard Time", "America/Atka": "Aleutian Standard Time", "America/Bahia": "Bahia Standard Time", "America/Bahia_Banderas": "Central Standard Time (Mexico)", "America/Barbados": "SA Western Standard Time", "America/Belem": "SA Eastern Standard Time", "America/Belize": "Central America Standard Time", "America/Blanc-Sablon": "SA Western Standard Time", "America/Boa_Vista": "SA Western Standard Time", "America/Bogota": "SA Pacific Standard Time", "America/Boise": "Mountain Standard Time", "America/Buenos_Aires": "Argentina Standard Time", "America/Cambridge_Bay": "Mountain Standard Time", "America/Campo_Grande": "Central Brazilian Standard Time", "America/Cancun": "Eastern Standard Time (Mexico)", "America/Caracas": "Venezuela Standard Time", "America/Catamarca": "Argentina Standard Time", "America/Cayenne": "SA Eastern Standard Time", "America/Cayman": "SA Pacific Standard Time", "America/Chicago": "Central Standard Time", "America/Chihuahua": "Central Standard Time (Mexico)", "America/Ciudad_Juarez": "Mountain Standard Time", "America/Coral_Harbour": "SA Pacific Standard Time", "America/Cordoba": "Argentina Standard Time", "America/Costa_Rica": "Central America Standard Time", "America/Creston": "US Mountain Standard Time", "America/Cuiaba": "Central Brazilian Standard Time", "America/Curacao": "SA Western Standard Time", "America/Danmarkshavn": "Greenwich Standard Time", "America/Dawson": "Yukon Standard Time", "America/Dawson_Creek": "US Mountain Standard Time", "America/Denver": "Mountain Standard Time", "America/Detroit": "Eastern Standard Time", "America/Dominica": "SA Western Standard Time", "America/Edmonton": "Mountain Standard Time", "America/Eirunepe": "SA Pacific Standard Time", "America/El_Salvador": "Central America Standard Time", "America/Ensenada": "Pacific Standard Time (Mexico)", "America/Fort_Nelson": "US Mountain Standard Time", "America/Fort_Wayne": "US Eastern Standard Time", "America/Fortaleza": "SA Eastern Standard Time", "America/Glace_Bay": "Atlantic Standard Time", "America/Godthab": "Greenland Standard Time", "America/Goose_Bay": "Atlantic Standard Time", "America/Grand_Turk": "Turks And Caicos Standard Time", "America/Grenada": "SA Western Standard Time", "America/Guadeloupe": "SA Western Standard Time", "America/Guatemala": "Central America Standard Time", "America/Guayaquil": "SA Pacific Standard Time", "America/Guyana": "SA Western Standard Time", "America/Halifax": "Atlantic Standard Time", "America/Havana": "Cuba Standard Time", "America/Hermosillo": "US Mountain Standard Time", "America/Indiana/Indianapolis": "US Eastern Standard Time", "America/Indiana/Knox": "Central Standard Time", "America/Indiana/Marengo": "US Eastern Standard Time", "America/Indiana/Petersburg": "Eastern Standard Time", "America/Indiana/Tell_City": "Central Standard Time", "America/Indiana/Vevay": "US Eastern Standard Time", "America/Indiana/Vincennes": "Eastern Standard Time", "America/Indiana/Winamac": "Eastern Standard Time", "America/Indianapolis": "US Eastern Standard Time", "America/Inuvik": "Mountain Standard Time", "America/Iqaluit": "Eastern Standard Time", "America/Jamaica": "SA Pacific Standard Time", "America/Jujuy": "Argentina Standard Time", "America/Juneau": "Alaskan Standard Time", "America/Kentucky/Louisville": "Eastern Standard Time", "America/Kentucky/Monticello": "Eastern Standard Time", "America/Knox_IN": "Central Standard Time", "America/Kralendijk": "SA Western Standard Time", "America/La_Paz": "SA Western Standard Time", "America/Lima": "SA Pacific Standard Time", "America/Los_Angeles": "Pacific Standard Time", "America/Louisville": "Eastern Standard Time", "America/Lower_Princes": "SA Western Standard Time", "America/Maceio": "SA Eastern Standard Time", "America/Managua": "Central America Standard Time", "America/Manaus": "SA Western Standard Time", "America/Marigot": "SA Western Standard Time", "America/Martinique": "SA Western Standard Time", "America/Matamoros": "Central Standard Time", "America/Mazatlan": "Mountain Standard Time (Mexico)", "America/Mendoza": "Argentina Standard Time", "America/Menominee": "Central Standard Time", "America/Merida": "Central Standard Time (Mexico)", "America/Metlakatla": "Alaskan Standard Time", "America/Mexico_City": "Central Standard Time (Mexico)", "America/Miquelon": "Saint Pierre Standard Time", "America/Moncton": "Atlantic Standard Time", "America/Monterrey": "Central Standard Time (Mexico)", "America/Montevideo": "Montevideo Standard Time", "America/Montreal": "Eastern Standard Time", "America/Montserrat": "SA Western Standard Time", "America/Nassau": "Eastern Standard Time", "America/New_York": "Eastern Standard Time", "America/Nipigon": "Eastern Standard Time", "America/Nome": "Alaskan Standard Time", "America/Noronha": "UTC-02", "America/North_Dakota/Beulah": "Central Standard Time", "America/North_Dakota/Center": "Central Standard Time", "America/North_Dakota/New_Salem": "Central Standard Time", "America/Nuuk": "Greenland Standard Time", "America/Ojinaga": "Central Standard Time", "America/Panama": "SA Pacific Standard Time", "America/Pangnirtung": "Eastern Standard Time", "America/Paramaribo": "SA Eastern Standard Time", "America/Phoenix": "US Mountain Standard Time", "America/Port-au-Prince": "Haiti Standard Time", "America/Port_of_Spain": "SA Western Standard Time", "America/Porto_Acre": "SA Pacific Standard Time", "America/Porto_Velho": "SA Western Standard Time", "America/Puerto_Rico": "SA Western Standard Time", "America/Punta_Arenas": "Magallanes Standard Time", "America/Rainy_River": "Central Standard Time", "America/Rankin_Inlet": "Central Standard Time", "America/Recife": "SA Eastern Standard Time", "America/Regina": "Canada Central Standard Time", "America/Resolute": "Central Standard Time", "America/Rio_Branco": "SA Pacific Standard Time", "America/Rosario": "Argentina Standard Time", "America/Santa_Isabel": "Pacific Standard Time (Mexico)", "America/Santarem": "SA Eastern Standard Time", "America/Santiago": "Pacific SA Standard Time", "America/Santo_Domingo": "SA Western Standard Time", "America/Sao_Paulo": "E. South America Standard Time", "America/Scoresbysund": "Azores Standard Time", "America/Shiprock": "Mountain Standard Time", "America/Sitka": "Alaskan Standard Time", "America/St_Barthelemy": "SA Western Standard Time", "America/St_Johns": "Newfoundland Standard Time", "America/St_Kitts": "SA Western Standard Time", "America/St_Lucia": "SA Western Standard Time", "America/St_Thomas": "SA Western Standard Time", "America/St_Vincent": "SA Western Standard Time", "America/Swift_Current": "Canada Central Standard Time", "America/Tegucigalpa": "Central America Standard Time", "America/Thule": "Atlantic Standard Time", "America/Thunder_Bay": "Eastern Standard Time", "America/Tijuana": "Pacific Standard Time (Mexico)", "America/Toronto": "Eastern Standard Time", "America/Tortola": "SA Western Standard Time", "America/Vancouver": "Pacific Standard Time", "America/Virgin": "SA Western Standard Time", "America/Whitehorse": "Yukon Standard Time", "America/Winnipeg": "Central Standard Time", "America/Yakutat": "Alaskan Standard Time", "America/Yellowknife": "Mountain Standard Time", "Antarctica/Casey": "Central Pacific Standard Time", "Antarctica/Davis": "SE Asia Standard Time", "Antarctica/DumontDUrville": "West Pacific Standard Time", "Antarctica/Macquarie": "Tasmania Standard Time", "Antarctica/Mawson": "West Asia Standard Time", "Antarctica/McMurdo": "New Zealand Standard Time", "Antarctica/Palmer": "SA Eastern Standard Time", "Antarctica/Rothera": "SA Eastern Standard Time", "Antarctica/South_Pole": "New Zealand Standard Time", "Antarctica/Syowa": "E. Africa Standard Time", "Antarctica/Vostok": "Central Asia Standard Time", "Arctic/Longyearbyen": "W. Europe Standard Time", "Asia/Aden": "Arab Standard Time", "Asia/Almaty": "Central Asia Standard Time", "Asia/Amman": "Jordan Standard Time", "Asia/Anadyr": "Russia Time Zone 11", "Asia/Aqtau": "West Asia Standard Time", "Asia/Aqtobe": "West Asia Standard Time", "Asia/Ashgabat": "West Asia Standard Time", "Asia/Ashkhabad": "West Asia Standard Time", "Asia/Atyrau": "West Asia Standard Time", "Asia/Baghdad": "Arabic Standard Time", "Asia/Bahrain": "Arab Standard Time", "Asia/Baku": "Azerbaijan Standard Time", "Asia/Bangkok": "SE Asia Standard Time", "Asia/Barnaul": "Altai Standard Time", "Asia/Beirut": "Middle East Standard Time", "Asia/Bishkek": "Central Asia Standard Time", "Asia/Brunei": "Singapore Standard Time", "Asia/Calcutta": "India Standard Time", "Asia/Chita": "Transbaikal Standard Time", "Asia/Choibalsan": "Ulaanbaatar Standard Time", "Asia/Chongqing": "China Standard Time", "Asia/Chungking": "China Standard Time", "Asia/Colombo": "Sri Lanka Standard Time", "Asia/Dacca": "Bangladesh Standard Time", "Asia/Damascus": "Syria Standard Time", "Asia/Dhaka": "Bangladesh Standard Time", "Asia/Dili": "Tokyo Standard Time", "Asia/Dubai": "Arabian Standard Time", "Asia/Dushanbe": "West Asia Standard Time", "Asia/Famagusta": "GTB Standard Time", "Asia/Gaza": "West Bank Standard Time", "Asia/Harbin": "China Standard Time", "Asia/Hebron": "West Bank Standard Time", "Asia/Ho_Chi_Minh": "SE Asia Standard Time", "Asia/Hong_Kong": "China Standard Time", "Asia/Hovd": "W. Mongolia Standard Time", "Asia/Irkutsk": "North Asia East Standard Time", "Asia/Istanbul": "Turkey Standard Time", "Asia/Jakarta": "SE Asia Standard Time", "Asia/Jayapura": "Tokyo Standard Time", "Asia/Jerusalem": "Israel Standard Time", "Asia/Kabul": "Afghanistan Standard Time", "Asia/Kamchatka": "Russia Time Zone 11", "Asia/Karachi": "Pakistan Standard Time", "Asia/Kashgar": "Central Asia Standard Time", "Asia/Kathmandu": "Nepal Standard Time", "Asia/Katmandu": "Nepal Standard Time", "Asia/Khandyga": "Yakutsk Standard Time", "Asia/Kolkata": "India Standard Time", "Asia/Krasnoyarsk": "North Asia Standard Time", "Asia/Kuala_Lumpur": "Singapore Standard Time", "Asia/Kuching": "Singapore Standard Time", "Asia/Kuwait": "Arab Standard Time", "Asia/Macao": "China Standard Time", "Asia/Macau": "China Standard Time", "Asia/Magadan": "Magadan Standard Time", "Asia/Makassar": "Singapore Standard Time", "Asia/Manila": "Singapore Standard Time", "Asia/Muscat": "Arabian Standard Time", "Asia/Nicosia": "GTB Standard Time", "Asia/Novokuznetsk": "North Asia Standard Time", "Asia/Novosibirsk": "N. Central Asia Standard Time", "Asia/Omsk": "Omsk Standard Time", "Asia/Oral": "West Asia Standard Time", "Asia/Phnom_Penh": "SE Asia Standard Time", "Asia/Pontianak": "SE Asia Standard Time", "Asia/Pyongyang": "North Korea Standard Time", "Asia/Qatar": "Arab Standard Time", "Asia/Qostanay": "Central Asia Standard Time", "Asia/Qyzylorda": "Qyzylorda Standard Time", "Asia/Rangoon": "Myanmar Standard Time", "Asia/Riyadh": "Arab Standard Time", "Asia/Saigon": "SE Asia Standard Time", "Asia/Sakhalin": "Sakhalin Standard Time", "Asia/Samarkand": "West Asia Standard Time", "Asia/Seoul": "Korea Standard Time", "Asia/Shanghai": "China Standard Time", "Asia/Singapore": "Singapore Standard Time", "Asia/Srednekolymsk": "Russia Time Zone 10", "Asia/Taipei": "Taipei Standard Time", "Asia/Tashkent": "West Asia Standard Time", "Asia/Tbilisi": "Georgian Standard Time", "Asia/Tehran": "Iran Standard Time", "Asia/Tel_Aviv": "Israel Standard Time", "Asia/Thimbu": "Bangladesh Standard Time", "Asia/Thimphu": "Bangladesh Standard Time", "Asia/Tokyo": "Tokyo Standard Time", "Asia/Tomsk": "Tomsk Standard Time", "Asia/Ujung_Pandang": "Singapore Standard Time", "Asia/Ulaanbaatar": "Ulaanbaatar Standard Time", "Asia/Ulan_Bator": "Ulaanbaatar Standard Time", "Asia/Urumqi": "Central Asia Standard Time", "Asia/Ust-Nera": "Vladivostok Standard Time", "Asia/Vientiane": "SE Asia Standard Time", "Asia/Vladivostok": "Vladivostok Standard Time", "Asia/Yakutsk": "Yakutsk Standard Time", "Asia/Yangon": "Myanmar Standard Time", "Asia/Yekaterinburg": "Ekaterinburg Standard Time", "Asia/Yerevan": "Caucasus Standard Time", "Atlantic/Azores": "Azores Standard Time", "Atlantic/Bermuda": "Atlantic Standard Time", "Atlantic/Canary": "GMT Standard Time", "Atlantic/Cape_Verde": "Cape Verde Standard Time", "Atlantic/Faeroe": "GMT Standard Time", "Atlantic/Faroe": "GMT Standard Time", "Atlantic/Jan_Mayen": "W. Europe Standard Time", "Atlantic/Madeira": "GMT Standard Time", "Atlantic/Reykjavik": "Greenwich Standard Time", "Atlantic/South_Georgia": "UTC-02", "Atlantic/St_Helena": "Greenwich Standard Time", "Atlantic/Stanley": "SA Eastern Standard Time", "Australia/ACT": "AUS Eastern Standard Time", "Australia/Adelaide": "Cen. Australia Standard Time", "Australia/Brisbane": "E. Australia Standard Time", "Australia/Broken_Hill": "Cen. Australia Standard Time", "Australia/Canberra": "AUS Eastern Standard Time", "Australia/Currie": "Tasmania Standard Time", "Australia/Darwin": "AUS Central Standard Time", "Australia/Eucla": "Aus Central W. Standard Time", "Australia/Hobart": "Tasmania Standard Time", "Australia/LHI": "Lord Howe Standard Time", "Australia/Lindeman": "E. Australia Standard Time", "Australia/Lord_Howe": "Lord Howe Standard Time", "Australia/Melbourne": "AUS Eastern Standard Time", "Australia/NSW": "AUS Eastern Standard Time", "Australia/North": "AUS Central Standard Time", "Australia/Perth": "W. Australia Standard Time", "Australia/Queensland": "E. Australia Standard Time", "Australia/South": "Cen. Australia Standard Time", "Australia/Sydney": "AUS Eastern Standard Time", "Australia/Tasmania": "Tasmania Standard Time", "Australia/Victoria": "AUS Eastern Standard Time", "Australia/West": "W. Australia Standard Time", "Australia/Yancowinna": "Cen. Australia Standard Time", "Brazil/Acre": "SA Pacific Standard Time", "Brazil/DeNoronha": "UTC-02", "Brazil/East": "E. South America Standard Time", "Brazil/West": "SA Western Standard Time", "CST6CDT": "Central Standard Time", "Canada/Atlantic": "Atlantic Standard Time", "Canada/Central": "Central Standard Time", "Canada/Eastern": "Eastern Standard Time", "Canada/Mountain": "Mountain Standard Time", "Canada/Newfoundland": "Newfoundland Standard Time", "Canada/Pacific": "Pacific Standard Time", "Canada/Saskatchewan": "Canada Central Standard Time", "Canada/Yukon": "Yukon Standard Time", "Chile/Continental": "Pacific SA Standard Time", "Chile/EasterIsland": "Easter Island Standard Time", "Cuba": "Cuba Standard Time", "EST5EDT": "Eastern Standard Time", "Egypt": "Egypt Standard Time", "Eire": "GMT Standard Time", "Etc/GMT": "UTC", "Etc/GMT+0": "UTC", "Etc/GMT+1": "Cape Verde Standard Time", "Etc/GMT+10": "Hawaiian Standard Time", "Etc/GMT+11": "UTC-11", "Etc/GMT+12": "Dateline Standard Time", "Etc/GMT+2": "UTC-02", "Etc/GMT+3": "SA Eastern Standard Time", "Etc/GMT+4": "SA Western Standard Time", "Etc/GMT+5": "SA Pacific Standard Time", "Etc/GMT+6": "Central America Standard Time", "Etc/GMT+7": "US Mountain Standard Time", "Etc/GMT+8": "UTC-08", "Etc/GMT+9": "UTC-09", "Etc/GMT-0": "UTC", "Etc/GMT-1": "W. Central Africa Standard Time", "Etc/GMT-10": "West Pacific Standard Time", "Etc/GMT-11": "Central Pacific Standard Time", "Etc/GMT-12": "UTC+12", "Etc/GMT-13": "UTC+13", "Etc/GMT-14": "Line Islands Standard Time", "Etc/GMT-2": "South Africa Standard Time", "Etc/GMT-3": "E. Africa Standard Time", "Etc/GMT-4": "Arabian Standard Time", "Etc/GMT-5": "West Asia Standard Time", "Etc/GMT-6": "Central Asia Standard Time", "Etc/GMT-7": "SE Asia Standard Time", "Etc/GMT-8": "Singapore Standard Time", "Etc/GMT-9": "Tokyo Standard Time", "Etc/GMT0": "UTC", "Etc/Greenwich": "UTC", "Etc/UCT": "UTC", "Etc/UTC": "UTC", "Etc/Universal": "UTC", "Etc/Zulu": "UTC", "Europe/Amsterdam": "W. Europe Standard Time", "Europe/Andorra": "W. Europe Standard Time", "Europe/Astrakhan": "Astrakhan Standard Time", "Europe/Athens": "GTB Standard Time", "Europe/Belfast": "GMT Standard Time", "Europe/Belgrade": "Central Europe Standard Time", "Europe/Berlin": "W. Europe Standard Time", "Europe/Bratislava": "Central Europe Standard Time", "Europe/Brussels": "Romance Standard Time", "Europe/Bucharest": "GTB Standard Time", "Europe/Budapest": "Central Europe Standard Time", "Europe/Busingen": "W. Europe Standard Time", "Europe/Chisinau": "E. Europe Standard Time", "Europe/Copenhagen": "Romance Standard Time", "Europe/Dublin": "GMT Standard Time", "Europe/Gibraltar": "W. Europe Standard Time", "Europe/Guernsey": "GMT Standard Time", "Europe/Helsinki": "FLE Standard Time", "Europe/Isle_of_Man": "GMT Standard Time", "Europe/Istanbul": "Turkey Standard Time", "Europe/Jersey": "GMT Standard Time", "Europe/Kaliningrad": "Kaliningrad Standard Time", "Europe/Kiev": "FLE Standard Time", "Europe/Kirov": "Russian Standard Time", "Europe/Kyiv": "FLE Standard Time", "Europe/Lisbon": "GMT Standard Time", "Europe/Ljubljana": "Central Europe Standard Time", "Europe/London": "GMT Standard Time", "Europe/Luxembourg": "W. Europe Standard Time", "Europe/Madrid": "Romance Standard Time", "Europe/Malta": "W. Europe Standard Time", "Europe/Mariehamn": "FLE Standard Time", "Europe/Minsk": "Belarus Standard Time", "Europe/Monaco": "W. Europe Standard Time", "Europe/Moscow": "Russian Standard Time", "Europe/Nicosia": "GTB Standard Time", "Europe/Oslo": "W. Europe Standard Time", "Europe/Paris": "Romance Standard Time", "Europe/Podgorica": "Central Europe Standard Time", "Europe/Prague": "Central Europe Standard Time", "Europe/Riga": "FLE Standard Time", "Europe/Rome": "W. Europe Standard Time", "Europe/Samara": "Russia Time Zone 3", "Europe/San_Marino": "W. Europe Standard Time", "Europe/Sarajevo": "Central European Standard Time", "Europe/Saratov": "Saratov Standard Time", "Europe/Simferopol": "Russian Standard Time", "Europe/Skopje": "Central European Standard Time", "Europe/Sofia": "FLE Standard Time", "Europe/Stockholm": "W. Europe Standard Time", "Europe/Tallinn": "FLE Standard Time", "Europe/Tirane": "Central Europe Standard Time", "Europe/Tiraspol": "E. Europe Standard Time", "Europe/Ulyanovsk": "Astrakhan Standard Time", "Europe/Uzhgorod": "FLE Standard Time", "Europe/Vaduz": "W. Europe Standard Time", "Europe/Vatican": "W. Europe Standard Time", "Europe/Vienna": "W. Europe Standard Time", "Europe/Vilnius": "FLE Standard Time", "Europe/Volgograd": "Volgograd Standard Time", "Europe/Warsaw": "Central European Standard Time", "Europe/Zagreb": "Central European Standard Time", "Europe/Zaporozhye": "FLE Standard Time", "Europe/Zurich": "W. Europe Standard Time", "GB": "GMT Standard Time", "GB-Eire": "GMT Standard Time", "GMT+0": "UTC", "GMT-0": "UTC", "GMT0": "UTC", "Greenwich": "UTC", "Hongkong": "China Standard Time", "Iceland": "Greenwich Standard Time", "Indian/Antananarivo": "E. Africa Standard Time", "Indian/Chagos": "Central Asia Standard Time", "Indian/Christmas": "SE Asia Standard Time", "Indian/Cocos": "Myanmar Standard Time", "Indian/Comoro": "E. Africa Standard Time", "Indian/Kerguelen": "West Asia Standard Time", "Indian/Mahe": "Mauritius Standard Time", "Indian/Maldives": "West Asia Standard Time", "Indian/Mauritius": "Mauritius Standard Time", "Indian/Mayotte": "E. Africa Standard Time", "Indian/Reunion": "Mauritius Standard Time", "Iran": "Iran Standard Time", "Israel": "Israel Standard Time", "Jamaica": "SA Pacific Standard Time", "Japan": "Tokyo Standard Time", "Kwajalein": "UTC+12", "Libya": "Libya Standard Time", "MST7MDT": "Mountain Standard Time", "Mexico/BajaNorte": "Pacific Standard Time (Mexico)", "Mexico/BajaSur": "Mountain Standard Time (Mexico)", "Mexico/General": "Central Standard Time (Mexico)", "NZ": "New Zealand Standard Time", "NZ-CHAT": "Chatham Islands Standard Time", "Navajo": "Mountain Standard Time", "PRC": "China Standard Time", "PST8PDT": "Pacific Standard Time", "Pacific/Apia": "Samoa Standard Time", "Pacific/Auckland": "New Zealand Standard Time", "Pacific/Bougainville": "Bougainville Standard Time", "Pacific/Chatham": "Chatham Islands Standard Time", "Pacific/Chuuk": "West Pacific Standard Time", "Pacific/Easter": "Easter Island Standard Time", "Pacific/Efate": "Central Pacific Standard Time", "Pacific/Enderbury": "UTC+13", "Pacific/Fakaofo": "UTC+13", "Pacific/Fiji": "Fiji Standard Time", "Pacific/Funafuti": "UTC+12", "Pacific/Galapagos": "Central America Standard Time", "Pacific/Gambier": "UTC-09", "Pacific/Guadalcanal": "Central Pacific Standard Time", "Pacific/Guam": "West Pacific Standard Time", "Pacific/Honolulu": "Hawaiian Standard Time", "Pacific/Johnston": "Hawaiian Standard Time", "Pacific/Kanton": "UTC+13", "Pacific/Kiritimati": "Line Islands Standard Time", "Pacific/Kosrae": "Central Pacific Standard Time", "Pacific/Kwajalein": "UTC+12", "Pacific/Majuro": "UTC+12", "Pacific/Marquesas": "Marquesas Standard Time", "Pacific/Midway": "UTC-11", "Pacific/Nauru": "UTC+12", "Pacific/Niue": "UTC-11", "Pacific/Norfolk": "Norfolk Standard Time", "Pacific/Noumea": "Central Pacific Standard Time", "Pacific/Pago_Pago": "UTC-11", "Pacific/Palau": "Tokyo Standard Time", "Pacific/Pitcairn": "UTC-08", "Pacific/Pohnpei": "Central Pacific Standard Time", "Pacific/Ponape": "Central Pacific Standard Time", "Pacific/Port_Moresby": "West Pacific Standard Time", "Pacific/Rarotonga": "Hawaiian Standard Time", "Pacific/Saipan": "West Pacific Standard Time", "Pacific/Samoa": "UTC-11", "Pacific/Tahiti": "Hawaiian Standard Time", "Pacific/Tarawa": "UTC+12", "Pacific/Tongatapu": "Tonga Standard Time", "Pacific/Truk": "West Pacific Standard Time", "Pacific/Wake": "UTC+12", "Pacific/Wallis": "UTC+12", "Pacific/Yap": "West Pacific Standard Time", "Poland": "Central European Standard Time", "Portugal": "GMT Standard Time", "ROC": "Taipei Standard Time", "ROK": "Korea Standard Time", "Singapore": "Singapore Standard Time", "Turkey": "Turkey Standard Time", "UCT": "UTC", "US/Alaska": "Alaskan Standard Time", "US/Aleutian": "Aleutian Standard Time", "US/Arizona": "US Mountain Standard Time", "US/Central": "Central Standard Time", "US/Eastern": "Eastern Standard Time", "US/Hawaii": "Hawaiian Standard Time", "US/Indiana-Starke": "Central Standard Time", "US/Michigan": "Eastern Standard Time", "US/Mountain": "Mountain Standard Time", "US/Pacific": "Pacific Standard Time", "US/Samoa": "UTC-11", "UTC": "UTC", "Universal": "UTC", "W-SU": "Russian Standard Time", "Zulu": "UTC", }
35,165
Python
.py
734
42.949591
68
0.688693
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,480
unix.py
rembo10_headphones/lib/tzlocal/unix.py
import logging import os import re import sys import warnings from datetime import timezone from tzlocal import utils if sys.version_info >= (3, 9): import zoneinfo # pragma: no cover else: from backports import zoneinfo # pragma: no cover _cache_tz = None _cache_tz_name = None log = logging.getLogger("tzlocal") def _get_localzone_name(_root="/"): """Tries to find the local timezone configuration. This method finds the timezone name, if it can, or it returns None. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" # First try the ENV setting. tzenv = utils._tz_name_from_env() if tzenv: return tzenv # Are we under Termux on Android? if os.path.exists(os.path.join(_root, "system/bin/getprop")): log.debug("This looks like Termux") import subprocess try: androidtz = ( subprocess.check_output(["getprop", "persist.sys.timezone"]) .strip() .decode() ) return androidtz except (OSError, subprocess.CalledProcessError): # proot environment or failed to getprop log.debug("It's not termux?") pass # Now look for distribution specific configuration files # that contain the timezone name. # Stick all of them in a dict, to compare later. found_configs = {} for configfile in ("etc/timezone", "var/db/zoneinfo"): tzpath = os.path.join(_root, configfile) try: with open(tzpath) as tzfile: data = tzfile.read() log.debug(f"{tzpath} found, contents:\n {data}") etctz = data.strip("/ \t\r\n") if not etctz: # Empty file, skip continue for etctz in etctz.splitlines(): # Get rid of host definitions and comments: if " " in etctz: etctz, dummy = etctz.split(" ", 1) if "#" in etctz: etctz, dummy = etctz.split("#", 1) if not etctz: continue found_configs[tzpath] = etctz.replace(" ", "_") except (OSError, UnicodeDecodeError): # File doesn't exist or is a directory, or it's a binary file. continue # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: zone_re = re.compile(r"\s*ZONE\s*=\s*\"") timezone_re = re.compile(r"\s*TIMEZONE\s*=\s*\"") end_re = re.compile('"') for filename in ("etc/sysconfig/clock", "etc/conf.d/clock"): tzpath = os.path.join(_root, filename) try: with open(tzpath, "rt") as tzfile: data = tzfile.readlines() log.debug(f"{tzpath} found, contents:\n {data}") for line in data: # Look for the ZONE= setting. match = zone_re.match(line) if match is None: # No ZONE= setting. Look for the TIMEZONE= setting. match = timezone_re.match(line) if match is not None: # Some setting existed line = line[match.end() :] etctz = line[: end_re.search(line).start()] # We found a timezone found_configs[tzpath] = etctz.replace(" ", "_") except (OSError, UnicodeDecodeError): # UnicodeDecode handles when clock is symlink to /etc/localtime continue # systemd distributions use symlinks that include the zone name, # see manpage of localtime(5) and timedatectl(1) tzpath = os.path.join(_root, "etc/localtime") if os.path.exists(tzpath) and os.path.islink(tzpath): log.debug(f"{tzpath} found") etctz = os.path.realpath(tzpath) start = etctz.find("/") + 1 while start != 0: etctz = etctz[start:] try: zoneinfo.ZoneInfo(etctz) tzinfo = f"{tzpath} is a symlink to" found_configs[tzinfo] = etctz.replace(" ", "_") # Only need first valid relative path in simlink. break except zoneinfo.ZoneInfoNotFoundError: pass start = etctz.find("/") + 1 if len(found_configs) > 0: log.debug(f"{len(found_configs)} found:\n {found_configs}") # We found some explicit config of some sort! if len(found_configs) > 1: # Uh-oh, multiple configs. See if they match: unique_tzs = set() zoneinfopath = os.path.join(_root, "usr", "share", "zoneinfo") directory_depth = len(zoneinfopath.split(os.path.sep)) for tzname in found_configs.values(): # Look them up in /usr/share/zoneinfo, and find what they # really point to: path = os.path.realpath(os.path.join(zoneinfopath, *tzname.split("/"))) real_zone_name = "/".join(path.split(os.path.sep)[directory_depth:]) unique_tzs.add(real_zone_name) if len(unique_tzs) != 1: message = "Multiple conflicting time zone configurations found:\n" for key, value in found_configs.items(): message += f"{key}: {value}\n" message += "Fix the configuration, or set the time zone in a TZ environment variable.\n" raise zoneinfo.ZoneInfoNotFoundError(message) # We found exactly one config! Use it. return list(found_configs.values())[0] def _get_localzone(_root="/"): """Creates a timezone object from the timezone name. If there is no timezone config, it will try to create a file from the localtime timezone, and if there isn't one, it will default to UTC. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" # First try the ENV setting. tzenv = utils._tz_from_env() if tzenv: return tzenv tzname = _get_localzone_name(_root) if tzname is None: # No explicit setting existed. Use localtime log.debug("No explicit setting existed. Use localtime") for filename in ("etc/localtime", "usr/local/etc/localtime"): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, "rb") as tzfile: tz = zoneinfo.ZoneInfo.from_file(tzfile, key="local") break else: warnings.warn("Can not find any timezone configuration, defaulting to UTC.") tz = timezone.utc else: tz = zoneinfo.ZoneInfo(tzname) if _root == "/": # We are using a file in etc to name the timezone. # Verify that the timezone specified there is actually used: utils.assert_tz_offset(tz, error=False) return tz def get_localzone_name() -> str: """Get the computers configured local timezone name, if any.""" global _cache_tz_name if _cache_tz_name is None: _cache_tz_name = _get_localzone_name() return _cache_tz_name def get_localzone() -> zoneinfo.ZoneInfo: """Get the computers configured local timezone, if any.""" global _cache_tz if _cache_tz is None: _cache_tz = _get_localzone() return _cache_tz def reload_localzone() -> zoneinfo.ZoneInfo: """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz_name global _cache_tz _cache_tz_name = _get_localzone_name() _cache_tz = _get_localzone() return _cache_tz
8,168
Python
.py
184
33.896739
104
0.592415
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,481
utils.py
rembo10_headphones/lib/tzlocal/utils.py
import calendar import datetime import logging import os import time import warnings try: import zoneinfo # pragma: no cover except ImportError: from backports import zoneinfo # pragma: no cover from tzlocal import windows_tz log = logging.getLogger("tzlocal") def get_tz_offset(tz): """Get timezone's offset using built-in function datetime.utcoffset().""" return int(datetime.datetime.now(tz).utcoffset().total_seconds()) def assert_tz_offset(tz, error=True): """Assert that system's timezone offset equals to the timezone offset found. If they don't match, we probably have a misconfiguration, for example, an incorrect timezone set in /etc/timezone file in systemd distributions. If error is True, this method will raise a ValueError, otherwise it will emit a warning. """ tz_offset = get_tz_offset(tz) system_offset = calendar.timegm(time.localtime()) - calendar.timegm(time.gmtime()) # No one has timezone offsets less than a minute, so this should be close enough: if abs(tz_offset - system_offset) > 60: msg = ( f"Timezone offset does not match system offset: {tz_offset} != {system_offset}. " "Please, check your config files." ) if error: raise ValueError(msg) warnings.warn(msg) def _tz_name_from_env(tzenv=None): if tzenv is None: tzenv = os.environ.get("TZ") if not tzenv: return None log.debug(f"Found a TZ environment: {tzenv}") if tzenv[0] == ":": tzenv = tzenv[1:] if tzenv in windows_tz.tz_win: # Yup, it's a timezone return tzenv if os.path.isabs(tzenv) and os.path.exists(tzenv): # It's a file specification, expand it, if possible parts = os.path.realpath(tzenv).split(os.sep) # Is it a zone info zone? possible_tz = "/".join(parts[-2:]) if possible_tz in windows_tz.tz_win: # Yup, it is return possible_tz # Maybe it's a short one, like UTC? if parts[-1] in windows_tz.tz_win: # Indeed return parts[-1] log.debug("TZ does not contain a time zone name") return None def _tz_from_env(tzenv=None): if tzenv is None: tzenv = os.environ.get("TZ") if not tzenv: return None # Some weird format that exists: if tzenv[0] == ":": tzenv = tzenv[1:] # TZ specifies a file if os.path.isabs(tzenv) and os.path.exists(tzenv): # Try to see if we can figure out the name tzname = _tz_name_from_env(tzenv) if not tzname: # Nope, not a standard timezone name, just take the filename tzname = tzenv.split(os.sep)[-1] with open(tzenv, "rb") as tzfile: return zoneinfo.ZoneInfo.from_file(tzfile, key=tzname) # TZ must specify a zoneinfo zone. try: tz = zoneinfo.ZoneInfo(tzenv) # That worked, so we return this: return tz except zoneinfo.ZoneInfoNotFoundError: # Nope, it's something like "PST4DST" etc, we can't handle that. raise zoneinfo.ZoneInfoNotFoundError( f"tzlocal() does not support non-zoneinfo timezones like {tzenv}. \n" "Please use a timezone in the form of Continent/City" ) from None
3,329
Python
.py
86
31.593023
93
0.645633
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,482
__init__.py
rembo10_headphones/lib/tzlocal/__init__.py
import sys if sys.platform == "win32": from tzlocal.win32 import ( get_localzone, get_localzone_name, reload_localzone, ) else: from tzlocal.unix import get_localzone, get_localzone_name, reload_localzone from tzlocal.utils import assert_tz_offset __all__ = [ "get_localzone", "get_localzone_name", "reload_localzone", "assert_tz_offset", ]
396
Python
.py
16
20.3125
80
0.679045
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,483
formatter.py
rembo10_headphones/lib/bs4/formatter.py
from bs4.dammit import EntitySubstitution class Formatter(EntitySubstitution): """Describes a strategy to use when outputting a parse tree to a string. Some parts of this strategy come from the distinction between HTML4, HTML5, and XML. Others are configurable by the user. Formatters are passed in as the `formatter` argument to methods like `PageElement.encode`. Most people won't need to think about formatters, and most people who need to think about them can pass in one of these predefined strings as `formatter` rather than making a new Formatter object: For HTML documents: * 'html' - HTML entity substitution for generic HTML documents. (default) * 'html5' - HTML entity substitution for HTML5 documents, as well as some optimizations in the way tags are rendered. * 'minimal' - Only make the substitutions necessary to guarantee valid HTML. * None - Do not perform any substitution. This will be faster but may result in invalid markup. For XML documents: * 'html' - Entity substitution for XHTML documents. * 'minimal' - Only make the substitutions necessary to guarantee valid XML. (default) * None - Do not perform any substitution. This will be faster but may result in invalid markup. """ # Registries of XML and HTML formatters. XML_FORMATTERS = {} HTML_FORMATTERS = {} HTML = 'html' XML = 'xml' HTML_DEFAULTS = dict( cdata_containing_tags=set(["script", "style"]), ) def _default(self, language, value, kwarg): if value is not None: return value if language == self.XML: return set() return self.HTML_DEFAULTS[kwarg] def __init__( self, language=None, entity_substitution=None, void_element_close_prefix='/', cdata_containing_tags=None, empty_attributes_are_booleans=False, ): """Constructor. :param language: This should be Formatter.XML if you are formatting XML markup and Formatter.HTML if you are formatting HTML markup. :param entity_substitution: A function to call to replace special characters with XML/HTML entities. For examples, see bs4.dammit.EntitySubstitution.substitute_html and substitute_xml. :param void_element_close_prefix: By default, void elements are represented as <tag/> (XML rules) rather than <tag> (HTML rules). To get <tag>, pass in the empty string. :param cdata_containing_tags: The list of tags that are defined as containing CDATA in this dialect. For example, in HTML, <script> and <style> tags are defined as containing CDATA, and their contents should not be formatted. :param blank_attributes_are_booleans: Render attributes whose value is the empty string as HTML-style boolean attributes. (Attributes whose value is None are always rendered this way.) """ self.language = language self.entity_substitution = entity_substitution self.void_element_close_prefix = void_element_close_prefix self.cdata_containing_tags = self._default( language, cdata_containing_tags, 'cdata_containing_tags' ) self.empty_attributes_are_booleans=empty_attributes_are_booleans def substitute(self, ns): """Process a string that needs to undergo entity substitution. This may be a string encountered in an attribute value or as text. :param ns: A string. :return: A string with certain characters replaced by named or numeric entities. """ if not self.entity_substitution: return ns from .element import NavigableString if (isinstance(ns, NavigableString) and ns.parent is not None and ns.parent.name in self.cdata_containing_tags): # Do nothing. return ns # Substitute. return self.entity_substitution(ns) def attribute_value(self, value): """Process the value of an attribute. :param ns: A string. :return: A string with certain characters replaced by named or numeric entities. """ return self.substitute(value) def attributes(self, tag): """Reorder a tag's attributes however you want. By default, attributes are sorted alphabetically. This makes behavior consistent between Python 2 and Python 3, and preserves backwards compatibility with older versions of Beautiful Soup. If `empty_boolean_attributes` is True, then attributes whose values are set to the empty string will be treated as boolean attributes. """ if tag.attrs is None: return [] return sorted( (k, (None if self.empty_attributes_are_booleans and v == '' else v)) for k, v in list(tag.attrs.items()) ) class HTMLFormatter(Formatter): """A generic Formatter for HTML.""" REGISTRY = {} def __init__(self, *args, **kwargs): return super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs) class XMLFormatter(Formatter): """A generic Formatter for XML.""" REGISTRY = {} def __init__(self, *args, **kwargs): return super(XMLFormatter, self).__init__(self.XML, *args, **kwargs) # Set up aliases for the default formatters. HTMLFormatter.REGISTRY['html'] = HTMLFormatter( entity_substitution=EntitySubstitution.substitute_html ) HTMLFormatter.REGISTRY["html5"] = HTMLFormatter( entity_substitution=EntitySubstitution.substitute_html, void_element_close_prefix=None, empty_attributes_are_booleans=True, ) HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter( entity_substitution=EntitySubstitution.substitute_xml ) HTMLFormatter.REGISTRY[None] = HTMLFormatter( entity_substitution=None ) XMLFormatter.REGISTRY["html"] = XMLFormatter( entity_substitution=EntitySubstitution.substitute_html ) XMLFormatter.REGISTRY["minimal"] = XMLFormatter( entity_substitution=EntitySubstitution.substitute_xml ) XMLFormatter.REGISTRY[None] = Formatter( Formatter(Formatter.XML, entity_substitution=None) )
6,385
Python
.py
142
36.957746
80
0.677862
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,484
element.py
rembo10_headphones/lib/bs4/element.py
# Use of this source code is governed by the MIT license. __license__ = "MIT" try: from collections.abc import Callable # Python 3.6 except ImportError as e: from collections import Callable import re import sys import warnings try: import soupsieve except ImportError as e: soupsieve = None warnings.warn( 'The soupsieve package is not installed. CSS selectors cannot be used.' ) from bs4.formatter import ( Formatter, HTMLFormatter, XMLFormatter, ) DEFAULT_OUTPUT_ENCODING = "utf-8" PY3K = (sys.version_info[0] > 2) nonwhitespace_re = re.compile(r"\S+") # NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on # the off chance someone imported it for their own use. whitespace_re = re.compile(r"\s+") def _alias(attr): """Alias one attribute name to another for backward compatibility""" @property def alias(self): return getattr(self, attr) @alias.setter def alias(self): return setattr(self, attr) return alias # These encodings are recognized by Python (so PageElement.encode # could theoretically support them) but XML and HTML don't recognize # them (so they should not show up in an XML or HTML document as that # document's encoding). # # If an XML document is encoded in one of these encodings, no encoding # will be mentioned in the XML declaration. If an HTML document is # encoded in one of these encodings, and the HTML document has a # <meta> tag that mentions an encoding, the encoding will be given as # the empty string. # # Source: # https://docs.python.org/3/library/codecs.html#python-specific-encodings PYTHON_SPECIFIC_ENCODINGS = set([ "idna", "mbcs", "oem", "palmos", "punycode", "raw_unicode_escape", "undefined", "unicode_escape", "raw-unicode-escape", "unicode-escape", "string-escape", "string_escape", ]) class NamespacedAttribute(str): """A namespaced string (e.g. 'xml:lang') that remembers the namespace ('xml') and the name ('lang') that were used to create it. """ def __new__(cls, prefix, name=None, namespace=None): if not name: # This is the default namespace. Its name "has no value" # per https://www.w3.org/TR/xml-names/#defaulting name = None if not name: obj = str.__new__(cls, prefix) elif not prefix: # Not really namespaced. obj = str.__new__(cls, name) else: obj = str.__new__(cls, prefix + ":" + name) obj.prefix = prefix obj.name = name obj.namespace = namespace return obj class AttributeValueWithCharsetSubstitution(str): """A stand-in object for a character encoding specified in HTML.""" class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'charset' attribute. When Beautiful Soup parses the markup '<meta charset="utf8">', the value of the 'charset' attribute will be one of these objects. """ def __new__(cls, original_value): obj = str.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): """When an HTML document is being encoded to a given encoding, the value of a meta tag's 'charset' is the name of the encoding. """ if encoding in PYTHON_SPECIFIC_ENCODINGS: return '' return encoding class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'content' attribute. When Beautiful Soup parses the markup: <meta http-equiv="content-type" content="text/html; charset=utf8"> The value of the 'content' attribute will be one of these objects. """ CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M) def __new__(cls, original_value): match = cls.CHARSET_RE.search(original_value) if match is None: # No substitution necessary. return str.__new__(str, original_value) obj = str.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): if encoding in PYTHON_SPECIFIC_ENCODINGS: return '' def rewrite(match): return match.group(1) + encoding return self.CHARSET_RE.sub(rewrite, self.original_value) class PageElement(object): """Contains the navigational information for some part of the page: that is, its current location in the parse tree. NavigableString, Tag, etc. are all subclasses of PageElement. """ def setup(self, parent=None, previous_element=None, next_element=None, previous_sibling=None, next_sibling=None): """Sets up the initial relations between this element and other elements. :param parent: The parent of this element. :param previous_element: The element parsed immediately before this one. :param next_element: The element parsed immediately before this one. :param previous_sibling: The most recently encountered element on the same level of the parse tree as this one. :param previous_sibling: The next element to be encountered on the same level of the parse tree as this one. """ self.parent = parent self.previous_element = previous_element if previous_element is not None: self.previous_element.next_element = self self.next_element = next_element if self.next_element is not None: self.next_element.previous_element = self self.next_sibling = next_sibling if self.next_sibling is not None: self.next_sibling.previous_sibling = self if (previous_sibling is None and self.parent is not None and self.parent.contents): previous_sibling = self.parent.contents[-1] self.previous_sibling = previous_sibling if previous_sibling is not None: self.previous_sibling.next_sibling = self def format_string(self, s, formatter): """Format the given string using the given formatter. :param s: A string. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ if formatter is None: return s if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) output = formatter.substitute(s) return output def formatter_for_name(self, formatter): """Look up or create a Formatter for the given identifier, if necessary. :param formatter: Can be a Formatter object (used as-is), a function (used as the entity substitution hook for an XMLFormatter or HTMLFormatter), or a string (used to look up an XMLFormatter or HTMLFormatter in the appropriate registry. """ if isinstance(formatter, Formatter): return formatter if self._is_xml: c = XMLFormatter else: c = HTMLFormatter if isinstance(formatter, Callable): return c(entity_substitution=formatter) return c.REGISTRY[formatter] @property def _is_xml(self): """Is this element part of an XML tree or an HTML tree? This is used in formatter_for_name, when deciding whether an XMLFormatter or HTMLFormatter is more appropriate. It can be inefficient, but it should be called very rarely. """ if self.known_xml is not None: # Most of the time we will have determined this when the # document is parsed. return self.known_xml # Otherwise, it's likely that this element was created by # direct invocation of the constructor from within the user's # Python code. if self.parent is None: # This is the top-level object. It should have .known_xml set # from tree creation. If not, take a guess--BS is usually # used on HTML markup. return getattr(self, 'is_xml', False) return self.parent._is_xml nextSibling = _alias("next_sibling") # BS3 previousSibling = _alias("previous_sibling") # BS3 default = object() def _all_strings(self, strip=False, types=default): """Yield all strings of certain classes, possibly stripping them. This is implemented differently in Tag and NavigableString. """ raise NotImplementedError() @property def stripped_strings(self): """Yield all strings in this PageElement, stripping them first. :yield: A sequence of stripped strings. """ for string in self._all_strings(True): yield string def get_text(self, separator="", strip=False, types=default): """Get all child strings of this PageElement, concatenated using the given separator. :param separator: Strings will be concatenated using this separator. :param strip: If True, strings will be stripped before being concatenated. :param types: A tuple of NavigableString subclasses. Any strings of a subclass not found in this list will be ignored. Although there are exceptions, the default behavior in most cases is to consider only NavigableString and CData objects. That means no comments, processing instructions, etc. :return: A string. """ return separator.join([s for s in self._all_strings( strip, types=types)]) getText = get_text text = property(get_text) def replace_with(self, *args): """Replace this PageElement with one or more PageElements, keeping the rest of the tree the same. :param args: One or more PageElements. :return: `self`, no longer part of the tree. """ if self.parent is None: raise ValueError( "Cannot replace one element with another when the " "element to be replaced is not part of a tree.") if len(args) == 1 and args[0] is self: return if any(x is self.parent for x in args): raise ValueError("Cannot replace a Tag with its parent.") old_parent = self.parent my_index = self.parent.index(self) self.extract(_self_index=my_index) for idx, replace_with in enumerate(args, start=my_index): old_parent.insert(idx, replace_with) return self replaceWith = replace_with # BS3 def unwrap(self): """Replace this PageElement with its contents. :return: `self`, no longer part of the tree. """ my_parent = self.parent if self.parent is None: raise ValueError( "Cannot replace an element with its contents when that" "element is not part of a tree.") my_index = self.parent.index(self) self.extract(_self_index=my_index) for child in reversed(self.contents[:]): my_parent.insert(my_index, child) return self replace_with_children = unwrap replaceWithChildren = unwrap # BS3 def wrap(self, wrap_inside): """Wrap this PageElement inside another one. :param wrap_inside: A PageElement. :return: `wrap_inside`, occupying the position in the tree that used to be occupied by `self`, and with `self` inside it. """ me = self.replace_with(wrap_inside) wrap_inside.append(me) return wrap_inside def extract(self, _self_index=None): """Destructively rips this element out of the tree. :param _self_index: The location of this element in its parent's .contents, if known. Passing this in allows for a performance optimization. :return: `self`, no longer part of the tree. """ if self.parent is not None: if _self_index is None: _self_index = self.parent.index(self) del self.parent.contents[_self_index] #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. last_child = self._last_descendant() next_element = last_child.next_element if (self.previous_element is not None and self.previous_element is not next_element): self.previous_element.next_element = next_element if next_element is not None and next_element is not self.previous_element: next_element.previous_element = self.previous_element self.previous_element = None last_child.next_element = None self.parent = None if (self.previous_sibling is not None and self.previous_sibling is not self.next_sibling): self.previous_sibling.next_sibling = self.next_sibling if (self.next_sibling is not None and self.next_sibling is not self.previous_sibling): self.next_sibling.previous_sibling = self.previous_sibling self.previous_sibling = self.next_sibling = None return self def _last_descendant(self, is_initialized=True, accept_self=True): """Finds the last element beneath this object to be parsed. :param is_initialized: Has `setup` been called on this PageElement yet? :param accept_self: Is `self` an acceptable answer to the question? """ if is_initialized and self.next_sibling is not None: last_child = self.next_sibling.previous_element else: last_child = self while isinstance(last_child, Tag) and last_child.contents: last_child = last_child.contents[-1] if not accept_self and last_child is self: last_child = None return last_child # BS3: Not part of the API! _lastRecursiveChild = _last_descendant def insert(self, position, new_child): """Insert a new PageElement in the list of this PageElement's children. This works the same way as `list.insert`. :param position: The numeric position that should be occupied in `self.children` by the new PageElement. :param new_child: A PageElement. """ if new_child is None: raise ValueError("Cannot insert None into a tag.") if new_child is self: raise ValueError("Cannot insert a tag into itself.") if (isinstance(new_child, str) and not isinstance(new_child, NavigableString)): new_child = NavigableString(new_child) from bs4 import BeautifulSoup if isinstance(new_child, BeautifulSoup): # We don't want to end up with a situation where one BeautifulSoup # object contains another. Insert the children one at a time. for subchild in list(new_child.contents): self.insert(position, subchild) position += 1 return position = min(position, len(self.contents)) if hasattr(new_child, 'parent') and new_child.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if new_child.parent is self: current_index = self.index(new_child) if current_index < position: # We're moving this element further down the list # of this object's children. That means that when # we extract this element, our target index will # jump down one. position -= 1 new_child.extract() new_child.parent = self previous_child = None if position == 0: new_child.previous_sibling = None new_child.previous_element = self else: previous_child = self.contents[position - 1] new_child.previous_sibling = previous_child new_child.previous_sibling.next_sibling = new_child new_child.previous_element = previous_child._last_descendant(False) if new_child.previous_element is not None: new_child.previous_element.next_element = new_child new_childs_last_element = new_child._last_descendant(False) if position >= len(self.contents): new_child.next_sibling = None parent = self parents_next_sibling = None while parents_next_sibling is None and parent is not None: parents_next_sibling = parent.next_sibling parent = parent.parent if parents_next_sibling is not None: # We found the element that comes next in the document. break if parents_next_sibling is not None: new_childs_last_element.next_element = parents_next_sibling else: # The last element of this tag is the last element in # the document. new_childs_last_element.next_element = None else: next_child = self.contents[position] new_child.next_sibling = next_child if new_child.next_sibling is not None: new_child.next_sibling.previous_sibling = new_child new_childs_last_element.next_element = next_child if new_childs_last_element.next_element is not None: new_childs_last_element.next_element.previous_element = new_childs_last_element self.contents.insert(position, new_child) def append(self, tag): """Appends the given PageElement to the contents of this one. :param tag: A PageElement. """ self.insert(len(self.contents), tag) def extend(self, tags): """Appends the given PageElements to this one's contents. :param tags: A list of PageElements. """ if isinstance(tags, Tag): # Calling self.append() on another tag's contents will change # the list we're iterating over. Make a list that won't # change. tags = list(tags.contents) for tag in tags: self.append(tag) def insert_before(self, *args): """Makes the given element(s) the immediate predecessor of this one. All the elements will have the same parent, and the given elements will be immediately before this one. :param args: One or more PageElements. """ parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'before' has no meaning.") if any(x is self for x in args): raise ValueError("Can't insert an element before itself.") for predecessor in args: # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(predecessor, PageElement): predecessor.extract() index = parent.index(self) parent.insert(index, predecessor) def insert_after(self, *args): """Makes the given element(s) the immediate successor of this one. The elements will have the same parent, and the given elements will be immediately after this one. :param args: One or more PageElements. """ # Do all error checking before modifying the tree. parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'after' has no meaning.") if any(x is self for x in args): raise ValueError("Can't insert an element after itself.") offset = 0 for successor in args: # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(successor, PageElement): successor.extract() index = parent.index(self) parent.insert(index+1+offset, successor) offset += 1 def find_next(self, name=None, attrs={}, text=None, **kwargs): """Find the first PageElement that matches the given criteria and appears later in the document than this PageElement. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_all_next, name, attrs, text, **kwargs) findNext = find_next # BS3 def find_all_next(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Find all PageElements that match the given criteria and appear later in the document than this PageElement. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet containing PageElements. """ return self._find_all(name, attrs, text, limit, self.next_elements, **kwargs) findAllNext = find_all_next # BS3 def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs): """Find the closest sibling to this PageElement that matches the given criteria and appears later in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_next_siblings, name, attrs, text, **kwargs) findNextSibling = find_next_sibling # BS3 def find_next_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Find all siblings of this PageElement that match the given criteria and appear later in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.next_siblings, **kwargs) findNextSiblings = find_next_siblings # BS3 fetchNextSiblings = find_next_siblings # BS2 def find_previous(self, name=None, attrs={}, text=None, **kwargs): """Look backwards in the document from this PageElement and find the first PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one( self.find_all_previous, name, attrs, text, **kwargs) findPrevious = find_previous # BS3 def find_all_previous(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Look backwards in the document from this PageElement and find all PageElements that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.previous_elements, **kwargs) findAllPrevious = find_all_previous # BS3 fetchPrevious = find_all_previous # BS2 def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this PageElement that matches the given criteria and appears earlier in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_previous_siblings, name, attrs, text, **kwargs) findPreviousSibling = find_previous_sibling # BS3 def find_previous_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all siblings to this PageElement that match the given criteria and appear earlier in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.previous_siblings, **kwargs) findPreviousSiblings = find_previous_siblings # BS3 fetchPreviousSiblings = find_previous_siblings # BS2 def find_parent(self, name=None, attrs={}, **kwargs): """Find the closest parent of this PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ # NOTE: We can't use _find_one because findParents takes a different # set of arguments. r = None l = self.find_parents(name, attrs, 1, **kwargs) if l: r = l[0] return r findParent = find_parent # BS3 def find_parents(self, name=None, attrs={}, limit=None, **kwargs): """Find all parents of this PageElement that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_all(name, attrs, None, limit, self.parents, **kwargs) findParents = find_parents # BS3 fetchParents = find_parents # BS2 @property def next(self): """The PageElement, if any, that was parsed just after this one. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self.next_element @property def previous(self): """The PageElement, if any, that was parsed just before this one. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self.previous_element #These methods do the real heavy lifting. def _find_one(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _find_all(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if text is None and 'string' in kwargs: text = kwargs['string'] del kwargs['string'] if isinstance(name, SoupStrainer): strainer = name else: strainer = SoupStrainer(name, attrs, text, **kwargs) if text is None and not limit and not attrs and not kwargs: if name is True or name is None: # Optimization to find all tags. result = (element for element in generator if isinstance(element, Tag)) return ResultSet(strainer, result) elif isinstance(name, str): # Optimization to find all tags with a given name. if name.count(':') == 1: # This is a name with a prefix. If this is a namespace-aware document, # we need to match the local name against tag.name. If not, # we need to match the fully-qualified name against tag.name. prefix, local_name = name.split(':', 1) else: prefix = None local_name = name result = (element for element in generator if isinstance(element, Tag) and ( element.name == name ) or ( element.name == local_name and (prefix is None or element.prefix == prefix) ) ) return ResultSet(strainer, result) results = ResultSet(strainer) while True: try: i = next(generator) except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These generators can be used to navigate starting from both #NavigableStrings and Tags. @property def next_elements(self): """All PageElements that were parsed after this one. :yield: A sequence of PageElements. """ i = self.next_element while i is not None: yield i i = i.next_element @property def next_siblings(self): """All PageElements that are siblings of this one but were parsed later. :yield: A sequence of PageElements. """ i = self.next_sibling while i is not None: yield i i = i.next_sibling @property def previous_elements(self): """All PageElements that were parsed before this one. :yield: A sequence of PageElements. """ i = self.previous_element while i is not None: yield i i = i.previous_element @property def previous_siblings(self): """All PageElements that are siblings of this one but were parsed earlier. :yield: A sequence of PageElements. """ i = self.previous_sibling while i is not None: yield i i = i.previous_sibling @property def parents(self): """All PageElements that are parents of this PageElement. :yield: A sequence of PageElements. """ i = self.parent while i is not None: yield i i = i.parent @property def decomposed(self): """Check whether a PageElement has been decomposed. :rtype: bool """ return getattr(self, '_decomposed', False) or False # Old non-property versions of the generators, for backwards # compatibility with BS3. def nextGenerator(self): return self.next_elements def nextSiblingGenerator(self): return self.next_siblings def previousGenerator(self): return self.previous_elements def previousSiblingGenerator(self): return self.previous_siblings def parentGenerator(self): return self.parents class NavigableString(str, PageElement): """A Python Unicode string that is part of a parse tree. When Beautiful Soup parses the markup <b>penguin</b>, it will create a NavigableString for the string "penguin". """ PREFIX = '' SUFFIX = '' # We can't tell just by looking at a string whether it's contained # in an XML document or an HTML document. known_xml = None def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, str): u = str.__new__(cls, value) else: u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) u.setup() return u def __copy__(self): """A copy of a NavigableString has the same contents and class as the original, but it is not connected to the parse tree. """ return type(self)(self) def __getnewargs__(self): return (str(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError( "'%s' object has no attribute '%s'" % ( self.__class__.__name__, attr)) def output_ready(self, formatter="minimal"): """Run the string through the provided formatter. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ output = self.format_string(self, formatter) return self.PREFIX + output + self.SUFFIX @property def name(self): """Since a NavigableString is not a Tag, it has no .name. This property is implemented so that code like this doesn't crash when run on a mixture of Tag and NavigableString objects: [x.name for x in tag.children] """ return None @name.setter def name(self, name): """Prevent NavigableString.name from ever being set.""" raise AttributeError("A NavigableString cannot be given a name.") def _all_strings(self, strip=False, types=PageElement.default): """Yield all strings of certain classes, possibly stripping them. This makes it easy for NavigableString to implement methods like get_text() as conveniences, creating a consistent text-extraction API across all PageElements. :param strip: If True, all strings will be stripped before being yielded. :param types: A tuple of NavigableString subclasses. If this NavigableString isn't one of those subclasses, the sequence will be empty. By default, the subclasses considered are NavigableString and CData objects. That means no comments, processing instructions, etc. :yield: A sequence that either contains this string, or is empty. """ if types is self.default: # This is kept in Tag because it's full of subclasses of # this class, which aren't defined until later in the file. types = Tag.DEFAULT_INTERESTING_STRING_TYPES # Do nothing if the caller is looking for specific types of # string, and we're of a different type. my_type = type(self) if types is not None: if isinstance(types, type): # Looking for a single type. if my_type is not types: return elif my_type not in types: # Looking for one of a list of types. return value = self if strip: value = value.strip() if len(value) > 0: yield value strings = property(_all_strings) class PreformattedString(NavigableString): """A NavigableString not subject to the normal formatting rules. This is an abstract class used for special kinds of strings such as comments (the Comment class) and CDATA blocks (the CData class). """ PREFIX = '' SUFFIX = '' def output_ready(self, formatter=None): """Make this string ready for output by adding any subclass-specific prefix or suffix. :param formatter: A Formatter object, or a string naming one of the standard formatters. The string will be passed into the Formatter, but only to trigger any side effects: the return value is ignored. :return: The string, with any subclass-specific prefix and suffix added on. """ if formatter is not None: ignore = self.format_string(self, formatter) return self.PREFIX + self + self.SUFFIX class CData(PreformattedString): """A CDATA block.""" PREFIX = '<![CDATA[' SUFFIX = ']]>' class ProcessingInstruction(PreformattedString): """A SGML processing instruction.""" PREFIX = '<?' SUFFIX = '>' class XMLProcessingInstruction(ProcessingInstruction): """An XML processing instruction.""" PREFIX = '<?' SUFFIX = '?>' class Comment(PreformattedString): """An HTML or XML comment.""" PREFIX = '<!--' SUFFIX = '-->' class Declaration(PreformattedString): """An XML declaration.""" PREFIX = '<?' SUFFIX = '?>' class Doctype(PreformattedString): """A document type declaration.""" @classmethod def for_name_and_ids(cls, name, pub_id, system_id): """Generate an appropriate document type declaration for a given public ID and system ID. :param name: The name of the document's root element, e.g. 'html'. :param pub_id: The Formal Public Identifier for this document type, e.g. '-//W3C//DTD XHTML 1.1//EN' :param system_id: The system identifier for this document type, e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd' :return: A Doctype. """ value = name or '' if pub_id is not None: value += ' PUBLIC "%s"' % pub_id if system_id is not None: value += ' "%s"' % system_id elif system_id is not None: value += ' SYSTEM "%s"' % system_id return Doctype(value) PREFIX = '<!DOCTYPE ' SUFFIX = '>\n' class Stylesheet(NavigableString): """A NavigableString representing an stylesheet (probably CSS). Used to distinguish embedded stylesheets from textual content. """ pass class Script(NavigableString): """A NavigableString representing an executable script (probably Javascript). Used to distinguish executable code from textual content. """ pass class TemplateString(NavigableString): """A NavigableString representing a string found inside an HTML template embedded in a larger document. Used to distinguish such strings from the main body of the document. """ pass class Tag(PageElement): """Represents an HTML or XML tag that is part of a parse tree, along with its attributes and contents. When Beautiful Soup parses the markup <b>penguin</b>, it will create a Tag object representing the <b> tag. """ def __init__(self, parser=None, builder=None, name=None, namespace=None, prefix=None, attrs=None, parent=None, previous=None, is_xml=None, sourceline=None, sourcepos=None, can_be_empty_element=None, cdata_list_attributes=None, preserve_whitespace_tags=None, interesting_string_types=None, ): """Basic constructor. :param parser: A BeautifulSoup object. :param builder: A TreeBuilder. :param name: The name of the tag. :param namespace: The URI of this Tag's XML namespace, if any. :param prefix: The prefix for this Tag's XML namespace, if any. :param attrs: A dictionary of this Tag's attribute values. :param parent: The PageElement to use as this Tag's parent. :param previous: The PageElement that was parsed immediately before this tag. :param is_xml: If True, this is an XML tag. Otherwise, this is an HTML tag. :param sourceline: The line number where this tag was found in its source document. :param sourcepos: The character position within `sourceline` where this tag was found. :param can_be_empty_element: If True, this tag should be represented as <tag/>. If False, this tag should be represented as <tag></tag>. :param cdata_list_attributes: A list of attributes whose values should be treated as CDATA if they ever show up on this tag. :param preserve_whitespace_tags: A list of tag names whose contents should have their whitespace preserved. :param interesting_string_types: This is a NavigableString subclass or a tuple of them. When iterating over this Tag's strings in methods like Tag.strings or Tag.get_text, these are the types of strings that are interesting enough to be considered. The default is to consider NavigableString and CData the only interesting string subtypes. """ if parser is None: self.parser_class = None else: # We don't actually store the parser object: that lets extracted # chunks be garbage-collected. self.parser_class = parser.__class__ if name is None: raise ValueError("No value provided for new tag's name.") self.name = name self.namespace = namespace self.prefix = prefix if ((not builder or builder.store_line_numbers) and (sourceline is not None or sourcepos is not None)): self.sourceline = sourceline self.sourcepos = sourcepos if attrs is None: attrs = {} elif attrs: if builder is not None and builder.cdata_list_attributes: attrs = builder._replace_cdata_list_attribute_values( self.name, attrs) else: attrs = dict(attrs) else: attrs = dict(attrs) # If possible, determine ahead of time whether this tag is an # XML tag. if builder: self.known_xml = builder.is_xml else: self.known_xml = is_xml self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False if builder is None: # In the absence of a TreeBuilder, use whatever values were # passed in here. They're probably None, unless this is a copy of some # other tag. self.can_be_empty_element = can_be_empty_element self.cdata_list_attributes = cdata_list_attributes self.preserve_whitespace_tags = preserve_whitespace_tags self.interesting_string_types = interesting_string_types else: # Set up any substitutions for this tag, such as the charset in a META tag. builder.set_up_substitutions(self) # Ask the TreeBuilder whether this tag might be an empty-element tag. self.can_be_empty_element = builder.can_be_empty_element(name) # Keep track of the list of attributes of this tag that # might need to be treated as a list. # # For performance reasons, we store the whole data structure # rather than asking the question of every tag. Asking would # require building a new data structure every time, and # (unlike can_be_empty_element), we almost never need # to check this. self.cdata_list_attributes = builder.cdata_list_attributes # Keep track of the names that might cause this tag to be treated as a # whitespace-preserved tag. self.preserve_whitespace_tags = builder.preserve_whitespace_tags if self.name in builder.string_containers: # This sort of tag uses a special string container # subclass for most of its strings. When we ask the self.interesting_string_types = builder.string_containers[self.name] else: self.interesting_string_types = self.DEFAULT_INTERESTING_STRING_TYPES parserClass = _alias("parser_class") # BS3 def __copy__(self): """A copy of a Tag is a new Tag, unconnected to the parse tree. Its contents are a copy of the old Tag's contents. """ clone = type(self)( None, self.builder, self.name, self.namespace, self.prefix, self.attrs, is_xml=self._is_xml, sourceline=self.sourceline, sourcepos=self.sourcepos, can_be_empty_element=self.can_be_empty_element, cdata_list_attributes=self.cdata_list_attributes, preserve_whitespace_tags=self.preserve_whitespace_tags ) for attr in ('can_be_empty_element', 'hidden'): setattr(clone, attr, getattr(self, attr)) for child in self.contents: clone.append(child.__copy__()) return clone @property def is_empty_element(self): """Is this tag an empty-element tag? (aka a self-closing tag) A tag that has contents is never an empty-element tag. A tag that has no contents may or may not be an empty-element tag. It depends on the builder used to create the tag. If the builder has a designated list of empty-element tags, then only a tag whose name shows up in that list is considered an empty-element tag. If the builder has no designated list of empty-element tags, then any tag with no contents is an empty-element tag. """ return len(self.contents) == 0 and self.can_be_empty_element isSelfClosing = is_empty_element # BS3 @property def string(self): """Convenience property to get the single string within this PageElement. TODO It might make sense to have NavigableString.string return itself. :return: If this element has a single string child, return value is that string. If this element has one child tag, return value is the 'string' attribute of the child tag, recursively. If this element is itself a string, has no children, or has more than one child, return value is None. """ if len(self.contents) != 1: return None child = self.contents[0] if isinstance(child, NavigableString): return child return child.string @string.setter def string(self, string): """Replace this PageElement's contents with `string`.""" self.clear() self.append(string.__class__(string)) DEFAULT_INTERESTING_STRING_TYPES = (NavigableString, CData) def _all_strings(self, strip=False, types=PageElement.default): """Yield all strings of certain classes, possibly stripping them. :param strip: If True, all strings will be stripped before being yielded. :param types: A tuple of NavigableString subclasses. Any strings of a subclass not found in this list will be ignored. By default, the subclasses considered are the ones found in self.interesting_string_types. If that's not specified, only NavigableString and CData objects will be considered. That means no comments, processing instructions, etc. :yield: A sequence of strings. """ if types is self.default: types = self.interesting_string_types for descendant in self.descendants: if (types is None and not isinstance(descendant, NavigableString)): continue descendant_type = type(descendant) if isinstance(types, type): if descendant_type is not types: # We're not interested in strings of this type. continue elif types is not None and descendant_type not in types: # We're not interested in strings of this type. continue if strip: descendant = descendant.strip() if len(descendant) == 0: continue yield descendant strings = property(_all_strings) def decompose(self): """Recursively destroys this PageElement and its children. This element will be removed from the tree and wiped out; so will everything beneath it. The behavior of a decomposed PageElement is undefined and you should never use one for anything, but if you need to _check_ whether an element has been decomposed, you can use the `decomposed` property. """ self.extract() i = self while i is not None: n = i.next_element i.__dict__.clear() i.contents = [] i._decomposed = True i = n def clear(self, decompose=False): """Wipe out all children of this PageElement by calling extract() on them. :param decompose: If this is True, decompose() (a more destructive method) will be called instead of extract(). """ if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract() def smooth(self): """Smooth out this element's children by consolidating consecutive strings. This makes pretty-printed output look more natural following a lot of operations that modified the tree. """ # Mark the first position of every pair of children that need # to be consolidated. Do this rather than making a copy of # self.contents, since in most cases very few strings will be # affected. marked = [] for i, a in enumerate(self.contents): if isinstance(a, Tag): # Recursively smooth children. a.smooth() if i == len(self.contents)-1: # This is the last item in .contents, and it's not a # tag. There's no chance it needs any work. continue b = self.contents[i+1] if (isinstance(a, NavigableString) and isinstance(b, NavigableString) and not isinstance(a, PreformattedString) and not isinstance(b, PreformattedString) ): marked.append(i) # Go over the marked positions in reverse order, so that # removing items from .contents won't affect the remaining # positions. for i in reversed(marked): a = self.contents[i] b = self.contents[i+1] b.extract() n = NavigableString(a+b) a.replace_with(n) def index(self, element): """Find the index of a child by identity, not value. Avoids issues with tag.contents.index(element) getting the index of equal elements. :param element: Look for this PageElement in `self.contents`. """ for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self.attrs.get(key, default) def get_attribute_list(self, key, default=None): """The same as get(), but always returns a list. :param key: The attribute to look for. :param default: Use this value if the attribute is not present on this PageElement. :return: A list of values, probably containing only a single value. """ value = self.get(key, default) if not isinstance(value, list): value = [value] return value def has_attr(self, key): """Does this PageElement have an attribute with the given name?""" return key in self.attrs def __hash__(self): return str(self).__hash__() def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the Tag, and throws an exception if it's not there.""" return self.attrs[key] def __iter__(self): "Iterating over a Tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a Tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __bool__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self.attrs[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." self.attrs.pop(key, None) def __call__(self, *args, **kwargs): """Calling a Tag like a function is the same as calling its find_all() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return self.find_all(*args, **kwargs) def __getattr__(self, tag): """Calling tag.subtag is the same as calling tag.find(name="subtag")""" #print("Getattr %s.%s" % (self.__class__, tag)) if len(tag) > 3 and tag.endswith('Tag'): # BS3: soup.aTag -> "soup.find("a") tag_name = tag[:-3] warnings.warn( '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict( name=tag_name ) ) return self.find(tag_name) # We special case contents to avoid recursion. elif not tag.startswith("__") and not tag == "contents": return self.find(tag) raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__, tag)) def __eq__(self, other): """Returns true iff this Tag has the same name, the same attributes, and the same contents (recursively) as `other`.""" if self is other: return True if (not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other)): return False for i, my_child in enumerate(self.contents): if my_child != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this Tag is not identical to `other`, as defined in __eq__.""" return not self == other def __repr__(self, encoding="unicode-escape"): """Renders this PageElement as a string. :param encoding: The encoding to use (Python 2 only). :return: Under Python 2, a bytestring; under Python 3, a Unicode string. """ if PY3K: # "The return value must be a string object", i.e. Unicode return self.decode() else: # "The return value must be a string object", i.e. a bytestring. # By convention, the return value of __repr__ should also be # an ASCII string. return self.encode(encoding) def __unicode__(self): """Renders this PageElement as a Unicode string.""" return self.decode() def __str__(self): """Renders this PageElement as a generic string. :return: Under Python 2, a UTF-8 bytestring; under Python 3, a Unicode string. """ if PY3K: return self.decode() else: return self.encode() if PY3K: __str__ = __repr__ = __unicode__ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, indent_level=None, formatter="minimal", errors="xmlcharrefreplace"): """Render a bytestring representation of this PageElement and its contents. :param encoding: The destination encoding. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param formatter: A Formatter object, or a string naming one of the standard formatters. :param errors: An error handling strategy such as 'xmlcharrefreplace'. This value is passed along into encode() and its value should be one of the constants defined by Python. :return: A bytestring. """ # Turn the data structure into Unicode, then encode the # Unicode. u = self.decode(indent_level, encoding, formatter) return u.encode(encoding, errors) def decode(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Render a Unicode representation of this PageElement and its contents. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The tag is destined to be encoded into this encoding. This method is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ # First off, turn a non-Formatter `formatter` into a Formatter # object. This will stop the lookup from happening over and # over again. if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) attributes = formatter.attributes(self) attrs = [] for key, val in attributes: if val is None: decoded = key else: if isinstance(val, list) or isinstance(val, tuple): val = ' '.join(val) elif not isinstance(val, str): val = str(val) elif ( isinstance(val, AttributeValueWithCharsetSubstitution) and eventual_encoding is not None ): val = val.encode(eventual_encoding) text = formatter.attribute_value(val) decoded = ( str(key) + '=' + formatter.quoted_attribute_value(text)) attrs.append(decoded) close = '' closeTag = '' prefix = '' if self.prefix: prefix = self.prefix + ":" if self.is_empty_element: close = formatter.void_element_close_prefix or '' else: closeTag = '</%s%s>' % (prefix, self.name) pretty_print = self._should_pretty_print(indent_level) space = '' indent_space = '' if indent_level is not None: indent_space = (' ' * (indent_level - 1)) if pretty_print: space = indent_space indent_contents = indent_level + 1 else: indent_contents = None contents = self.decode_contents( indent_contents, eventual_encoding, formatter ) if self.hidden: # This is the 'document root' object. s = contents else: s = [] attribute_string = '' if attrs: attribute_string = ' ' + ' '.join(attrs) if indent_level is not None: # Even if this particular tag is not pretty-printed, # we should indent up to the start of the tag. s.append(indent_space) s.append('<%s%s%s%s>' % ( prefix, self.name, attribute_string, close)) if pretty_print: s.append("\n") s.append(contents) if pretty_print and contents and contents[-1] != "\n": s.append("\n") if pretty_print and closeTag: s.append(space) s.append(closeTag) if indent_level is not None and closeTag and self.next_sibling: # Even if this particular tag is not pretty-printed, # we're now done with the tag, and we should add a # newline if appropriate. s.append("\n") s = ''.join(s) return s def _should_pretty_print(self, indent_level): """Should this tag be pretty-printed? Most of them should, but some (such as <pre> in HTML documents) should not. """ return ( indent_level is not None and ( not self.preserve_whitespace_tags or self.name not in self.preserve_whitespace_tags ) ) def prettify(self, encoding=None, formatter="minimal"): """Pretty-print this PageElement as a string. :param encoding: The eventual encoding of the string. If this is None, a Unicode string will be returned. :param formatter: A Formatter object, or a string naming one of the standard formatters. :return: A Unicode string (if encoding==None) or a bytestring (otherwise). """ if encoding is None: return self.decode(True, formatter=formatter) else: return self.encode(encoding, True, formatter=formatter) def decode_contents(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this tag as a Unicode string. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The tag is destined to be encoded into this encoding. decode_contents() is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. :param formatter: A Formatter object, or a string naming one of the standard Formatters. """ # First off, turn a string formatter into a Formatter object. This # will stop the lookup from happening over and over again. if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) pretty_print = (indent_level is not None) s = [] for c in self: text = None if isinstance(c, NavigableString): text = c.output_ready(formatter) elif isinstance(c, Tag): s.append(c.decode(indent_level, eventual_encoding, formatter)) preserve_whitespace = ( self.preserve_whitespace_tags and self.name in self.preserve_whitespace_tags ) if text and indent_level and not preserve_whitespace: text = text.strip() if text: if pretty_print and not preserve_whitespace: s.append(" " * (indent_level - 1)) s.append(text) if pretty_print and not preserve_whitespace: s.append("\n") return ''.join(s) def encode_contents( self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this PageElement as a bytestring. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The bytestring will be in this encoding. :param formatter: A Formatter object, or a string naming one of the standard Formatters. :return: A bytestring. """ contents = self.decode_contents(indent_level, encoding, formatter) return contents.encode(encoding) # Old method for BS3 compatibility def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Deprecated method for BS3 compatibility.""" if not prettyPrint: indentLevel = None return self.encode_contents( indent_level=indentLevel, encoding=encoding) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Look in the children of this PageElement and find the first PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param recursive: If this is True, find() will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ r = None l = self.find_all(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find #BS2 def find_all(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Look in the children of this PageElement and find all PageElements that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param recursive: If this is True, find_all() will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ generator = self.descendants if not recursive: generator = self.children return self._find_all(name, attrs, text, limit, generator, **kwargs) findAll = find_all # BS3 findChildren = find_all # BS2 #Generator methods @property def children(self): """Iterate over all direct children of this PageElement. :yield: A sequence of PageElements. """ # return iter() to make the purpose of the method clear return iter(self.contents) # XXX This seems to be untested. @property def descendants(self): """Iterate over all children of this PageElement in a breadth-first sequence. :yield: A sequence of PageElements. """ if not len(self.contents): return stopNode = self._last_descendant().next_element current = self.contents[0] while current is not stopNode: yield current current = current.next_element # CSS selector code def select_one(self, selector, namespaces=None, **kwargs): """Perform a CSS selection operation on the current element. :param selector: A CSS selector. :param namespaces: A dictionary mapping namespace prefixes used in the CSS selector to namespace URIs. By default, Beautiful Soup will use the prefixes it encountered while parsing the document. :param kwargs: Keyword arguments to be passed into SoupSieve's soupsieve.select() method. :return: A Tag. :rtype: bs4.element.Tag """ value = self.select(selector, namespaces, 1, **kwargs) if value: return value[0] return None def select(self, selector, namespaces=None, limit=None, **kwargs): """Perform a CSS selection operation on the current element. This uses the SoupSieve library. :param selector: A string containing a CSS selector. :param namespaces: A dictionary mapping namespace prefixes used in the CSS selector to namespace URIs. By default, Beautiful Soup will use the prefixes it encountered while parsing the document. :param limit: After finding this number of results, stop looking. :param kwargs: Keyword arguments to be passed into SoupSieve's soupsieve.select() method. :return: A ResultSet of Tags. :rtype: bs4.element.ResultSet """ if namespaces is None: namespaces = self._namespaces if limit is None: limit = 0 if soupsieve is None: raise NotImplementedError( "Cannot execute CSS selectors because the soupsieve package is not installed." ) results = soupsieve.select(selector, self, namespaces, limit, **kwargs) # We do this because it's more consistent and because # ResultSet.__getattr__ has a helpful error message. return ResultSet(None, results) # Old names for backwards compatibility def childGenerator(self): """Deprecated generator.""" return self.children def recursiveChildGenerator(self): """Deprecated generator.""" return self.descendants def has_key(self, key): """Deprecated method. This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway. """ warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % ( key)) return self.has_attr(key) # Next, a couple classes to represent queries and their results. class SoupStrainer(object): """Encapsulates a number of ways of matching a markup element (tag or string). This is primarily used to underpin the find_* methods, but you can create one yourself and pass it in as `parse_only` to the `BeautifulSoup` constructor, to parse a subset of a large document. """ def __init__(self, name=None, attrs={}, text=None, **kwargs): """Constructor. The SoupStrainer constructor takes the same arguments passed into the find_* methods. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. """ self.name = self._normalize_search_value(name) if not isinstance(attrs, dict): # Treat a non-dict value for attrs as a search for the 'class' # attribute. kwargs['class'] = attrs attrs = None if 'class_' in kwargs: # Treat class_="foo" as a search for the 'class' # attribute, overriding any non-dict value for attrs. kwargs['class'] = kwargs['class_'] del kwargs['class_'] if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs normalized_attrs = {} for key, value in list(attrs.items()): normalized_attrs[key] = self._normalize_search_value(value) self.attrs = normalized_attrs self.text = self._normalize_search_value(text) def _normalize_search_value(self, value): # Leave it alone if it's a Unicode string, a callable, a # regular expression, a boolean, or None. if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match') or isinstance(value, bool) or value is None): return value # If it's a bytestring, convert it to Unicode, treating it as UTF-8. if isinstance(value, bytes): return value.decode("utf8") # If it's listlike, convert it into a list of strings. if hasattr(value, '__iter__'): new_value = [] for v in value: if (hasattr(v, '__iter__') and not isinstance(v, bytes) and not isinstance(v, str)): # This is almost certainly the user's mistake. In the # interests of avoiding infinite loops, we'll let # it through as-is rather than doing a recursive call. new_value.append(v) else: new_value.append(self._normalize_search_value(v)) return new_value # Otherwise, convert it into a Unicode string. # The unicode(str()) thing is so this will do the same thing on Python 2 # and Python 3. return str(str(value)) def __str__(self): """A human-readable representation of this SoupStrainer.""" if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def search_tag(self, markup_name=None, markup_attrs={}): """Check whether a Tag with the given name and attributes would match this SoupStrainer. Used prospectively to decide whether to even bother creating a Tag object. :param markup_name: A tag name as found in some markup. :param markup_attrs: A dictionary of attributes as found in some markup. :return: True if the prospective tag would match this SoupStrainer; False otherwise. """ found = None markup = None if isinstance(markup_name, Tag): markup = markup_name markup_attrs = markup if isinstance(self.name, str): # Optimization for a very common case where the user is # searching for a tag with one specific name, and we're # looking at a tag with a different name. if markup and not markup.prefix and self.name != markup.name: return False call_function_with_tag_data = ( isinstance(self.name, Callable) and not isinstance(markup_name, Tag)) if ((not self.name) or call_function_with_tag_data or (markup and self._matches(markup, self.name)) or (not markup and self._matches(markup_name, self.name))): if call_function_with_tag_data: match = self.name(markup_name, markup_attrs) else: match = True markup_attr_map = None for attr, match_against in list(self.attrs.items()): if not markup_attr_map: if hasattr(markup_attrs, 'get'): markup_attr_map = markup_attrs else: markup_attr_map = {} for k, v in markup_attrs: markup_attr_map[k] = v attr_value = markup_attr_map.get(attr) if not self._matches(attr_value, match_against): match = False break if match: if markup: found = markup else: found = markup_name if found and self.text and not self._matches(found.string, self.text): found = None return found # For BS3 compatibility. searchTag = search_tag def search(self, markup): """Find all items in `markup` that match this SoupStrainer. Used by the core _find_all() method, which is ultimately called by all find_* methods. :param markup: A PageElement or a list of them. """ # print('looking for %s in %s' % (self, markup)) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text or self.name or self.attrs: found = self.search_tag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, str): if not self.name and not self.attrs and self._matches(markup, self.text): found = markup else: raise Exception( "I don't know how to match against a %s" % markup.__class__) return found def _matches(self, markup, match_against, already_tried=None): # print(u"Matching %s against %s" % (markup, match_against)) result = False if isinstance(markup, list) or isinstance(markup, tuple): # This should only happen when searching a multi-valued attribute # like 'class'. for item in markup: if self._matches(item, match_against): return True # We didn't match any particular value of the multivalue # attribute, but maybe we match the attribute value when # considered as a string. if self._matches(' '.join(markup), match_against): return True return False if match_against is True: # True matches any non-None value. return markup is not None if isinstance(match_against, Callable): return match_against(markup) # Custom callables take the tag as an argument, but all # other ways of matching match the tag name as a string. original_markup = markup if isinstance(markup, Tag): markup = markup.name # Ensure that `markup` is either a Unicode string, or None. markup = self._normalize_search_value(markup) if markup is None: # None matches None, False, an empty string, an empty list, and so on. return not match_against if (hasattr(match_against, '__iter__') and not isinstance(match_against, str)): # We're asked to match against an iterable of items. # The markup must be match at least one item in the # iterable. We'll try each one in turn. # # To avoid infinite recursion we need to keep track of # items we've already seen. if not already_tried: already_tried = set() for item in match_against: if item.__hash__: key = item else: key = id(item) if key in already_tried: continue else: already_tried.add(key) if self._matches(original_markup, item, already_tried): return True else: return False # Beyond this point we might need to run the test twice: once against # the tag's name and once against its prefixed name. match = False if not match and isinstance(match_against, str): # Exact string match match = markup == match_against if not match and hasattr(match_against, 'search'): # Regexp match return match_against.search(markup) if (not match and isinstance(original_markup, Tag) and original_markup.prefix): # Try the whole thing again with the prefixed tag name. return self._matches( original_markup.prefix + ':' + original_markup.name, match_against ) return match class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source, result=()): """Constructor. :param source: A SoupStrainer. :param result: A list of PageElements. """ super(ResultSet, self).__init__(result) self.source = source def __getattr__(self, key): """Raise a helpful exception to explain a common code fix.""" raise AttributeError( "ResultSet object has no attribute '%s'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?" % key )
85,238
Python
.py
1,897
34.232999
181
0.611399
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,485
diagnose.py
rembo10_headphones/lib/bs4/diagnose.py
"""Diagnostic functions, mainly for use when doing tech support.""" # Use of this source code is governed by the MIT license. __license__ = "MIT" import cProfile from io import StringIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def diagnose(data): """Diagnostic suite for isolating common problems. :param data: A string containing markup that needs to be explained. :return: None; diagnostics are printed to standard output. """ print(("Diagnostic running on Beautiful Soup %s" % __version__)) print(("Python version %s" % sys.version)) basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: for builder in builder_registry.builders: if name in builder.features: break else: basic_parsers.remove(name) print(( "I noticed that %s is not installed. Installing it may help." % name)) if 'lxml' in basic_parsers: basic_parsers.append("lxml-xml") try: from lxml import etree print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))) except ImportError as e: print( "lxml is not installed or couldn't be imported.") if 'html5lib' in basic_parsers: try: import html5lib print(("Found html5lib version %s" % html5lib.__version__)) except ImportError as e: print( "html5lib is not installed or couldn't be imported.") if hasattr(data, 'read'): data = data.read() elif data.startswith("http:") or data.startswith("https:"): print(('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data)) print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.") return else: try: if os.path.exists(data): print(('"%s" looks like a filename. Reading data from the file.' % data)) with open(data) as fp: data = fp.read() except ValueError: # This can happen on some platforms when the 'filename' is # too long. Assume it's data and not a filename. pass print("") for parser in basic_parsers: print(("Trying to parse your markup with %s" % parser)) success = False try: soup = BeautifulSoup(data, features=parser) success = True except Exception as e: print(("%s could not parse the markup." % parser)) traceback.print_exc() if success: print(("Here's what %s did with the markup:" % parser)) print((soup.prettify())) print(("-" * 80)) def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. You can use this to determine whether an lxml-specific problem is in Beautiful Soup's lxml tree builders or in lxml itself. :param data: Some markup. :param html: If True, markup will be parsed with lxml's HTML parser. if False, lxml's XML parser will be used. """ from lxml import etree for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): print(("%s, %4s, %s" % (event, element.tag, element.text))) class AnnouncingParser(HTMLParser): """Subclass of HTMLParser that announces parse events, without doing anything else. You can use this to get a picture of how html.parser sees a given document. The easiest way to do this is to call `htmlparser_trace`. """ def _p(self, s): print(s) def handle_starttag(self, name, attrs): self._p("%s START" % name) def handle_endtag(self, name): self._p("%s END" % name) def handle_data(self, data): self._p("%s DATA" % data) def handle_charref(self, name): self._p("%s CHARREF" % name) def handle_entityref(self, name): self._p("%s ENTITYREF" % name) def handle_comment(self, data): self._p("%s COMMENT" % data) def handle_decl(self, data): self._p("%s DECL" % data) def unknown_decl(self, data): self._p("%s UNKNOWN-DECL" % data) def handle_pi(self, data): self._p("%s PI" % data) def htmlparser_trace(data): """Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. :param data: Some markup. """ parser = AnnouncingParser() parser.feed(data) _vowels = "aeiou" _consonants = "bcdfghjklmnpqrstvwxyz" def rword(length=5): "Generate a random word-like string." s = '' for i in range(length): if i % 2 == 0: t = _consonants else: t = _vowels s += random.choice(t) return s def rsentence(length=4): "Generate a random sentence-like string." return " ".join(rword(random.randint(4,9)) for i in range(length)) def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" print(("Comparative parser benchmark on Beautiful Soup %s" % __version__)) data = rdoc(num_elements) print(("Generated a large invalid HTML document (%d bytes)." % len(data))) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False try: a = time.time() soup = BeautifulSoup(data, parser) b = time.time() success = True except Exception as e: print(("%s could not parse the markup." % parser)) traceback.print_exc() if success: print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a))) from lxml import etree a = time.time() etree.HTML(data) b = time.time() print(("Raw lxml parsed the markup in %.2fs." % (b-a))) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() print(("Raw html5lib parsed the markup in %.2fs." % (b-a))) def profile(num_elements=100000, parser="lxml"): """Use Python's profiler on a randomly generated document.""" filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name data = rdoc(num_elements) vars = dict(bs4=bs4, data=data, parser=parser) cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) stats = pstats.Stats(filename) # stats.strip_dirs() stats.sort_stats("cumulative") stats.print_stats('_html5lib|bs4', 50) # If this file is run as a script, standard input is diagnosed. if __name__ == '__main__': diagnose(sys.stdin.read())
7,755
Python
.py
200
31.35
129
0.619784
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,486
__init__.py
rembo10_headphones/lib/bs4/__init__.py
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend". http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup uses a pluggable XML or HTML parser to parse a (possibly invalid) document into a tree representation. Beautiful Soup provides methods and Pythonic idioms that make it easy to navigate, search, and modify the parse tree. Beautiful Soup works with Python 3.5 and up. It works better if lxml and/or html5lib is installed. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ """ __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "4.10.0" __copyright__ = "Copyright (c) 2004-2021 Leonard Richardson" # Use of this source code is governed by the MIT license. __license__ = "MIT" __all__ = ['BeautifulSoup'] from collections import Counter import os import re import sys import traceback import warnings # The very first thing we do is give a useful error if someone is # running this code under Python 2. if sys.version_info.major < 3: raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.') from .builder import builder_registry, ParserRejectedMarkup from .dammit import UnicodeDammit from .element import ( CData, Comment, DEFAULT_OUTPUT_ENCODING, Declaration, Doctype, NavigableString, PageElement, ProcessingInstruction, PYTHON_SPECIFIC_ENCODINGS, ResultSet, Script, Stylesheet, SoupStrainer, Tag, TemplateString, ) # Define some custom warnings. class GuessedAtParserWarning(UserWarning): """The warning issued when BeautifulSoup has to guess what parser to use -- probably because no parser was specified in the constructor. """ class MarkupResemblesLocatorWarning(UserWarning): """The warning issued when BeautifulSoup is given 'markup' that actually looks like a resource locator -- a URL or a path to a file on disk. """ class BeautifulSoup(Tag): """A data structure representing a parsed HTML or XML document. Most of the methods you'll call on a BeautifulSoup object are inherited from PageElement or Tag. Internally, this class defines the basic interface called by the tree builders when converting an HTML/XML document into a data structure. The interface abstracts away the differences between parsers. To write a new tree builder, you'll need to understand these methods as a whole. These methods will be called by the BeautifulSoup constructor: * reset() * feed(markup) The tree builder may call these methods from its feed() implementation: * handle_starttag(name, attrs) # See note about return value * handle_endtag(name) * handle_data(data) # Appends to the current data node * endData(containerClass) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's <br> tag), call handle_starttag and then handle_endtag. """ # Since BeautifulSoup subclasses Tag, it's possible to treat it as # a Tag with a .name. This name makes it clear the BeautifulSoup # object isn't a real markup tag. ROOT_TAG_NAME = '[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] # A string containing all ASCII whitespace characters, used in # endData() to detect data chunks that seem 'empty'. ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, element_classes=None, **kwargs): """Constructor. :param markup: A string or a file-like object representing markup to be parsed. :param features: Desirable features of the parser to be used. This may be the name of a specific parser ("lxml", "lxml-xml", "html.parser", or "html5lib") or it may be the type of markup to be used ("html", "html5", "xml"). It's recommended that you name a specific parser, so that Beautiful Soup gives you the same results across platforms and virtual environments. :param builder: A TreeBuilder subclass to instantiate (or instance to use) instead of looking one up based on `features`. You only need to use this if you've implemented a custom TreeBuilder. :param parse_only: A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered. This is useful when parsing part of a document that would otherwise be too large to fit into memory. :param from_encoding: A string indicating the encoding of the document to be parsed. Pass this in if Beautiful Soup is guessing wrongly about the document's encoding. :param exclude_encodings: A list of strings indicating encodings known to be wrong. Pass this in if you don't know the document's encoding but you know Beautiful Soup's guess is wrong. :param element_classes: A dictionary mapping BeautifulSoup classes like Tag and NavigableString, to other classes you'd like to be instantiated instead as the parse tree is built. This is useful for subclassing Tag or NavigableString to modify default behavior. :param kwargs: For backwards compatibility purposes, the constructor accepts certain keyword arguments used in Beautiful Soup 3. None of these arguments do anything in Beautiful Soup 4; they will result in a warning and then be ignored. Apart from this, any keyword arguments passed into the BeautifulSoup constructor are propagated to the TreeBuilder constructor. This makes it possible to configure a TreeBuilder by passing in arguments, not just by saying which one to use. """ if 'convertEntities' in kwargs: del kwargs['convertEntities'] warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. Suggest you use " "features='lxml' for HTML and features='lxml-xml' for " "XML.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name)) value = kwargs[old_name] del kwargs[old_name] return value return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if from_encoding and isinstance(markup, str): warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") from_encoding = None self.element_classes = element_classes or dict() # We need this information to track whether or not the builder # was specified well enough that we can omit the 'you need to # specify a parser' warning. original_builder = builder original_features = features if isinstance(builder, type): # A builder class was passed in; it needs to be instantiated. builder_class = builder builder = None elif builder is None: if isinstance(features, str): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) # At this point either we have a TreeBuilder instance in # builder, or we have a builder_class that we can instantiate # with the remaining **kwargs. if builder is None: builder = builder_class(**kwargs) if not original_builder and not ( original_features == builder.NAME or original_features in builder.ALTERNATE_NAMES ) and markup: # The user did not tell us which TreeBuilder to use, # and we had to guess. Issue a warning. if builder.is_xml: markup_type = "XML" else: markup_type = "HTML" # This code adapted from warnings.py so that we get the same line # of code as our warnings.warn() call gets, even if the answer is wrong # (as it may be in a multithreading situation). caller = None try: caller = sys._getframe(1) except ValueError: pass if caller: globals = caller.f_globals line_number = caller.f_lineno else: globals = sys.__dict__ line_number= 1 filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] if filename: # If there is no filename at all, the user is most likely in a REPL, # and the warning is not necessary. values = dict( filename=filename, line_number=line_number, parser=builder.NAME, markup_type=markup_type ) warnings.warn( self.NO_PARSER_SPECIFIED_WARNING % values, GuessedAtParserWarning, stacklevel=2 ) else: if kwargs: warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.") self.builder = builder self.is_xml = builder.is_xml self.known_xml = self.is_xml self._namespaces = dict() self.parse_only = parse_only self.builder.initialize_soup(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256 and ( (isinstance(markup, bytes) and not b'<' in markup) or (isinstance(markup, str) and not '<' in markup) ): # Print out warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # just in case that's what the user really wants. if (isinstance(markup, str) and not os.path.supports_unicode_filenames): possible_filename = markup.encode("utf8") else: possible_filename = markup is_file = False is_directory = False try: is_file = os.path.exists(possible_filename) if is_file: is_directory = os.path.isdir(possible_filename) except Exception as e: # This is almost certainly a problem involving # characters not valid in filenames on this # system. Just let it go. pass if is_directory: warnings.warn( '"%s" looks like a directory name, not markup. You may' ' want to open a file found in this directory and pass' ' the filehandle into Beautiful Soup.' % ( self._decode_markup(markup) ), MarkupResemblesLocatorWarning ) elif is_file: warnings.warn( '"%s" looks like a filename, not markup. You should' ' probably open this file and pass the filehandle into' ' Beautiful Soup.' % self._decode_markup(markup), MarkupResemblesLocatorWarning ) self._check_markup_is_url(markup) rejections = [] success = False for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup( markup, from_encoding, exclude_encodings=exclude_encodings)): self.reset() try: self._feed() success = True break except ParserRejectedMarkup as e: rejections.append(e) pass if not success: other_exceptions = [str(e) for e in rejections] raise ParserRejectedMarkup( "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions) ) # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def __copy__(self): """Copy a BeautifulSoup object by converting the document to a string and parsing it again.""" copy = type(self)( self.encode('utf-8'), builder=self.builder, from_encoding='utf-8' ) # Although we encoded the tree to UTF-8, that may not have # been the encoding of the original markup. Set the copy's # .original_encoding to reflect the original object's # .original_encoding. copy.original_encoding = self.original_encoding return copy def __getstate__(self): # Frequently a tree builder can't be pickled. d = dict(self.__dict__) if 'builder' in d and not self.builder.picklable: d['builder'] = None return d @classmethod def _decode_markup(cls, markup): """Ensure `markup` is bytes so it's safe to send into warnings.warn. TODO: warnings.warn had this problem back in 2010 but it might not anymore. """ if isinstance(markup, bytes): decoded = markup.decode('utf-8', 'replace') else: decoded = markup return decoded @classmethod def _check_markup_is_url(cls, markup): """Error-handling method to raise a warning if incoming markup looks like a URL. :param markup: A string. """ if isinstance(markup, bytes): space = b' ' cant_start_with = (b"http:", b"https:") elif isinstance(markup, str): space = ' ' cant_start_with = ("http:", "https:") else: return if any(markup.startswith(prefix) for prefix in cant_start_with): if not space in markup: warnings.warn( '"%s" looks like a URL. Beautiful Soup is not an' ' HTTP client. You should probably use an HTTP client like' ' requests to get the document behind the URL, and feed' ' that document to Beautiful Soup.' % cls._decode_markup( markup ), MarkupResemblesLocatorWarning ) def _feed(self): """Internal method that parses previously set markup, creating a large number of Tag and NavigableString objects. """ # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): """Reset this object to a state as though it had never parsed any markup. """ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.open_tag_counter = Counter() self.preserve_whitespace_tag_stack = [] self.string_container_stack = [] self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, sourceline=None, sourcepos=None, **kwattrs): """Create a new Tag associated with this BeautifulSoup object. :param name: The name of the new Tag. :param namespace: The URI of the new Tag's XML namespace, if any. :param prefix: The prefix for the new Tag's XML namespace, if any. :param attrs: A dictionary of this Tag's attribute values; can be used instead of `kwattrs` for attributes like 'class' that are reserved words in Python. :param sourceline: The line number where this tag was (purportedly) found in its source document. :param sourcepos: The character position within `sourceline` where this tag was (purportedly) found. :param kwattrs: Keyword arguments for the new Tag's attribute values. """ kwattrs.update(attrs) return self.element_classes.get(Tag, Tag)( None, self.builder, name, namespace, nsprefix, kwattrs, sourceline=sourceline, sourcepos=sourcepos ) def string_container(self, base_class=None): container = base_class or NavigableString # There may be a general override of NavigableString. container = self.element_classes.get( container, container ) # On top of that, we may be inside a tag that needs a special # container class. if self.string_container_stack and container is NavigableString: container = self.builder.string_containers.get( self.string_container_stack[-1].name, container ) return container def new_string(self, s, subclass=None): """Create a new NavigableString associated with this BeautifulSoup object. """ container = self.string_container(subclass) return container(s) def insert_before(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): """Internal method called by _popToTag when a tag is closed.""" tag = self.tagStack.pop() if tag.name in self.open_tag_counter: self.open_tag_counter[tag.name] -= 1 if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() if self.string_container_stack and tag == self.string_container_stack[-1]: self.string_container_stack.pop() #print("Pop", tag.name) if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): """Internal method called by handle_starttag when a tag is opened.""" #print("Push", tag.name) if self.currentTag is not None: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name != self.ROOT_TAG_NAME: self.open_tag_counter[tag.name] += 1 if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) if tag.name in self.builder.string_containers: self.string_container_stack.append(tag) def endData(self, containerClass=None): """Method called by the TreeBuilder when the end of a data segment occurs. """ if self.current_data: current_data = ''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return containerClass = self.string_container(containerClass) o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Method called by the TreeBuilder to integrate an object into the parse tree.""" if parent is None: parent = self.currentTag if most_recent_element is not None: previous_element = most_recent_element else: previous_element = self._most_recent_element next_element = previous_sibling = next_sibling = None if isinstance(o, Tag): next_element = o.next_element next_sibling = o.next_sibling previous_sibling = o.previous_sibling if previous_element is None: previous_element = o.previous_element fix = parent.next_element is not None o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) self._most_recent_element = o parent.contents.append(o) # Check if we are inserting into an already parsed node. if fix: self._linkage_fixer(parent) def _linkage_fixer(self, el): """Make sure linkage of this fragment is sound.""" first = el.contents[0] child = el.contents[-1] descendant = child if child is first and el.parent is not None: # Parent should be linked to first child el.next_element = child # We are no longer linked to whatever this element is prev_el = child.previous_element if prev_el is not None and prev_el is not el: prev_el.next_element = None # First child should be linked to the parent, and no previous siblings. child.previous_element = el child.previous_sibling = None # We have no sibling as we've been appended as the last. child.next_sibling = None # This index is a tag, dig deeper for a "last descendant" if isinstance(child, Tag) and child.contents: descendant = child._last_descendant(False) # As the final step, link last descendant. It should be linked # to the parent's next sibling (if found), else walk up the chain # and find a parent with a sibling. It should have no next sibling. descendant.next_element = None descendant.next_sibling = None target = el while True: if target is None: break elif target.next_sibling is not None: descendant.next_element = target.next_sibling target.next_sibling.previous_element = child break target = target.parent def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If there are no open tags with the given name, nothing will be popped. :param name: Pop up to the most recent tag with this name. :param nsprefix: The namespace prefix that goes with `name`. :param inclusivePop: It this is false, pops the tag stack up to but *not* including the most recent instqance of the given tag. """ #print("Popping to %s" % name) if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): if not self.open_tag_counter.get(name): break t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None, sourcepos=None): """Called by the tree builder when a new tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. :param attrs: A dictionary of attribute values. :param sourceline: The line number where this tag was found in its source document. :param sourcepos: The character position within `sourceline` where this tag was found. If this method returns None, the tag was rejected by an active SoupStrainer. You should proceed as if the tag had not occurred in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print("Start tag %s: %s" % (name, attrs)) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = self.element_classes.get(Tag, Tag)( self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element, sourceline=sourceline, sourcepos=sourcepos ) if tag is None: return tag if self._most_recent_element is not None: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): """Called by the tree builder when an ending tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. """ #print("End tag: " + name) self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): """Called by the tree builder when a chunk of textual data is encountered.""" self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Returns a string or Unicode representation of the parse tree as an HTML or XML document. :param pretty_print: If this is True, indentation will be used to make the document more readable. :param eventual_encoding: The encoding of the final document. If this is None, the document will be a Unicode string. """ if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: # This is a special Python encoding; it can't actually # go into an XML document because it means nothing # outside of Python. eventual_encoding = None if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = '<?xml version="1.0"%s?>\n' % encoding_part else: prefix = '' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter) # Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup' _s = BeautifulSoup _soup = BeautifulSoup class BeautifulStoneSoup(BeautifulSoup): """Deprecated interface to an XML parser.""" def __init__(self, *args, **kwargs): kwargs['features'] = 'xml' warnings.warn( 'The BeautifulStoneSoup class is deprecated. Instead of using ' 'it, pass features="xml" into the BeautifulSoup constructor.') super(BeautifulStoneSoup, self).__init__(*args, **kwargs) class StopParsing(Exception): """Exception raised by a TreeBuilder if it's unable to continue parsing.""" pass class FeatureNotFound(ValueError): """Exception raised by the BeautifulSoup constructor if no parser with the requested features is found. """ pass #If this file is run as a script, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print((soup.prettify()))
32,673
Python
.py
688
36.197674
536
0.614666
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,487
testing.py
rembo10_headphones/lib/bs4/testing.py
"""Helper classes for tests.""" import pickle import copy import functools import unittest from unittest import TestCase from bs4 import BeautifulSoup from bs4.element import ( CharsetMetaAttributeValue, Comment, ContentMetaAttributeValue, Doctype, SoupStrainer, ) from bs4.builder import HTMLParserTreeBuilder default_builder = HTMLParserTreeBuilder class SoupTest(unittest.TestCase): @property def default_builder(self): return default_builder() def soup(self, markup, **kwargs): """Build a Beautiful Soup object from markup.""" builder = kwargs.pop('builder', self.default_builder) return BeautifulSoup(markup, builder=builder, **kwargs) def document_for(self, markup): """Turn an HTML fragment into a document. The details depend on the builder. """ return self.default_builder.test_fragment_to_document(markup) def assertSoupEquals(self, to_parse, compare_parsed_to=None): builder = self.default_builder obj = BeautifulSoup(to_parse, builder=builder) if compare_parsed_to is None: compare_parsed_to = to_parse self.assertEqual(obj.decode(), self.document_for(compare_parsed_to)) def assertConnectedness(self, element): """Ensure that next_element and previous_element are properly set for all descendants of the given element. """ earlier = None for e in element.descendants: if earlier: self.assertEqual(e, earlier.next_element) self.assertEqual(earlier, e.previous_element) earlier = e class HTMLTreeBuilderSmokeTest(object): """A basic test of a treebuilder's competence. Any HTML treebuilder, present or future, should be able to pass these tests. With invalid markup, there's room for interpretation, and different parsers can handle it differently. But with the markup in these tests, there's not much room for interpretation. """ def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def assertDoctypeHandled(self, doctype_fragment): """Assert that a given doctype string is handled correctly.""" doctype_str, soup = self._document_with_doctype(doctype_fragment) # Make sure a Doctype object was created. doctype = soup.contents[0] self.assertEqual(doctype.__class__, Doctype) self.assertEqual(doctype, doctype_fragment) self.assertEqual(str(soup)[:len(doctype_str)], doctype_str) # Make sure that the doctype was correctly associated with the # parse tree and that the rest of the document parsed. self.assertEqual(soup.p.contents[0], 'foo') def _document_with_doctype(self, doctype_fragment): """Generate and parse a document with the given doctype.""" doctype = '<!DOCTYPE %s>' % doctype_fragment markup = doctype + '\n<p>foo</p>' soup = self.soup(markup) return doctype, soup def test_normal_doctypes(self): """Make sure normal, everyday HTML doctypes are handled correctly.""" self.assertDoctypeHandled("html") self.assertDoctypeHandled( 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_public_doctype_with_url(self): doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' self.assertDoctypeHandled(doctype) def test_system_doctype(self): self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') def test_namespaced_system_doctype(self): # We can handle a namespaced doctype with a system ID. self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') def test_namespaced_public_doctype(self): # Test a namespaced doctype with a public id. self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b""), markup.replace(b"\n", b"")) def test_processing_instruction(self): markup = b"""<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_deepcopy(self): """Make sure you can copy the tree builder. This is important because the builder is part of a BeautifulSoup object, and we want to be able to copy that. """ copy.deepcopy(self.default_builder) def test_p_tag_is_never_empty_element(self): """A <p> tag is never designated as an empty-element tag. Even if the markup shows it as an empty-element tag, it shouldn't be presented that way. """ soup = self.soup("<p/>") self.assertFalse(soup.p.is_empty_element) self.assertEqual(str(soup.p), "<p></p>") def test_unclosed_tags_get_closed(self): """A tag that's not closed by the end of the document should be closed. This applies to all tags except empty-element tags. """ self.assertSoupEquals("<p>", "<p></p>") self.assertSoupEquals("<b>", "<b></b>") self.assertSoupEquals("<br>", "<br/>") def test_br_is_always_empty_element_tag(self): """A <br> tag is designated as an empty-element tag. Some parsers treat <br></br> as one <br/> tag, some parsers as two tags, but it should always be an empty-element tag. """ soup = self.soup("<br></br>") self.assertTrue(soup.br.is_empty_element) self.assertEqual(str(soup.br), "<br/>") def test_nested_formatting_elements(self): self.assertSoupEquals("<em><em></em></em>") def test_double_head(self): html = '''<!DOCTYPE html> <html> <head> <title>Ordinary HEAD element test</title> </head> <script type="text/javascript"> alert("Help!"); </script> <body> Hello, world! </body> </html> ''' soup = self.soup(html) self.assertEqual("text/javascript", soup.find('script')['type']) def test_comment(self): # Comments are represented as Comment objects. markup = "<p>foo<!--foobar-->baz</p>" self.assertSoupEquals(markup) soup = self.soup(markup) comment = soup.find(text="foobar") self.assertEqual(comment.__class__, Comment) # The comment is properly integrated into the tree. foo = soup.find(text="foo") self.assertEqual(comment, foo.next_element) baz = soup.find(text="baz") self.assertEqual(comment, baz.previous_element) def test_preserved_whitespace_in_pre_and_textarea(self): """Whitespace must be preserved in <pre> and <textarea> tags.""" self.assertSoupEquals("<pre> </pre>") self.assertSoupEquals("<textarea> woo </textarea>") def test_nested_inline_elements(self): """Inline elements can be nested indefinitely.""" b_tag = "<b>Inside a B tag</b>" self.assertSoupEquals(b_tag) nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>" self.assertSoupEquals(nested_b_tag) double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>" self.assertSoupEquals(nested_b_tag) def test_nested_block_level_elements(self): """Block elements can be nested.""" soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>') blockquote = soup.blockquote self.assertEqual(blockquote.p.b.string, 'Foo') self.assertEqual(blockquote.b.string, 'Foo') def test_correctly_nested_tables(self): """One table can go inside another one.""" markup = ('<table id="1">' '<tr>' "<td>Here's another table:" '<table id="2">' '<tr><td>foo</td></tr>' '</table></td>') self.assertSoupEquals( markup, '<table id="1"><tr><td>Here\'s another table:' '<table id="2"><tr><td>foo</td></tr></table>' '</td></tr></table>') self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>") def test_deeply_nested_multivalued_attribute(self): # html5lib can set the attributes of the same tag many times # as it rearranges the tree. This has caused problems with # multivalued attributes. markup = '<table><div><div class="css"></div></div></table>' soup = self.soup(markup) self.assertEqual(["css"], soup.div.div['class']) def test_multivalued_attribute_on_html(self): # html5lib uses a different API to set the attributes ot the # <html> tag. This has caused problems with multivalued # attributes. markup = '<html class="a b"></html>' soup = self.soup(markup) self.assertEqual(["a", "b"], soup.html['class']) def test_angle_brackets_in_attribute_values_are_escaped(self): self.assertSoupEquals('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>') def test_entities_in_attributes_converted_to_unicode(self): expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>' self.assertSoupEquals('<p id="pi&#241;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#Xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&ntilde;ata"></p>', expect) def test_entities_in_text_converted_to_unicode(self): expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>' self.assertSoupEquals("<p>pi&#241;ata</p>", expect) self.assertSoupEquals("<p>pi&#xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&#Xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&ntilde;ata</p>", expect) def test_quot_entity_converted_to_quotation_mark(self): self.assertSoupEquals("<p>I said &quot;good day!&quot;</p>", '<p>I said "good day!"</p>') def test_out_of_range_entity(self): expect = "\N{REPLACEMENT CHARACTER}" self.assertSoupEquals("&#10000000000000;", expect) self.assertSoupEquals("&#x10000000000000;", expect) self.assertSoupEquals("&#1000000000;", expect) def test_multipart_strings(self): "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." soup = self.soup("<html><h2>\nfoo</h2><p></p></html>") self.assertEqual("p", soup.h2.string.next_element.name) self.assertEqual("p", soup.p.name) self.assertConnectedness(soup) def test_head_tag_between_head_and_body(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<html><head></head> <link></link> <body>foo</body> </html> """ soup = self.soup(content) self.assertNotEqual(None, soup.html.body) self.assertConnectedness(soup) def test_multiple_copies_of_a_tag(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<!DOCTYPE html> <html> <body> <article id="a" > <div><a href="1"></div> <footer> <a href="2"></a> </footer> </article> </body> </html> """ soup = self.soup(content) self.assertConnectedness(soup.article) def test_basic_namespaces(self): """Parsers don't need to *understand* namespaces, but at the very least they should not choke on namespaces or lose data.""" markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>' soup = self.soup(markup) self.assertEqual(markup, soup.encode()) html = soup.html self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns']) self.assertEqual( 'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml']) self.assertEqual( 'http://www.w3.org/2000/svg', soup.html['xmlns:svg']) def test_multivalued_attribute_value_becomes_list(self): markup = b'<a class="foo bar">' soup = self.soup(markup) self.assertEqual(['foo', 'bar'], soup.a['class']) # # Generally speaking, tests below this point are more tests of # Beautiful Soup than tests of the tree builders. But parsers are # weird, so we run these tests separately for every tree builder # to detect any differences between them. # def test_can_parse_unicode_document(self): # A seemingly innocuous document... but it's in Unicode! And # it contains characters that can't be represented in the # encoding found in the declaration! The horror! markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>' soup = self.soup(markup) self.assertEqual('Sacr\xe9 bleu!', soup.body.string) def test_soupstrainer(self): """Parsers should be able to work with SoupStrainers.""" strainer = SoupStrainer("b") soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>", parse_only=strainer) self.assertEqual(soup.decode(), "<b>bold</b>") def test_single_quote_attribute_values_become_double_quotes(self): self.assertSoupEquals("<foo attr='bar'></foo>", '<foo attr="bar"></foo>') def test_attribute_values_with_nested_quotes_are_left_alone(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" self.assertSoupEquals(text) def test_attribute_values_with_double_nested_quotes_get_quoted(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" soup = self.soup(text) soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' self.assertSoupEquals( soup.foo.decode(), """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""") def test_ampersand_in_attribute_value_gets_escaped(self): self.assertSoupEquals('<this is="really messed up & stuff"></this>', '<this is="really messed up &amp; stuff"></this>') self.assertSoupEquals( '<a href="http://example.org?a=1&b=2;3">foo</a>', '<a href="http://example.org?a=1&amp;b=2;3">foo</a>') def test_escaped_ampersand_in_attribute_value_is_left_alone(self): self.assertSoupEquals('<a href="http://example.org?a=1&amp;b=2;3"></a>') def test_entities_in_strings_converted_during_parsing(self): # Both XML and HTML entities are converted to Unicode characters # during parsing. text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>" self.assertSoupEquals(text, expected) def test_smart_quotes_converted_on_the_way_in(self): # Microsoft smart quotes are converted to Unicode characters during # parsing. quote = b"<p>\x91Foo\x92</p>" soup = self.soup(quote) self.assertEqual( soup.p.string, "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") def test_non_breaking_spaces_converted_on_the_way_in(self): soup = self.soup("<a>&nbsp;&nbsp;</a>") self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2) def test_entities_converted_on_the_way_out(self): text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode("utf-8") soup = self.soup(text) self.assertEqual(soup.p.encode("utf-8"), expected) def test_real_iso_latin_document(self): # Smoke test of interrelated functionality, using an # easy-to-understand document. # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>' # That's because we're going to encode it into ISO-Latin-1, and use # that to test. iso_latin_html = unicode_html.encode("iso-8859-1") # Parse the ISO-Latin-1 HTML. soup = self.soup(iso_latin_html) # Encode it to UTF-8. result = soup.encode("utf-8") # What do we expect the result to look like? Well, it would # look like unicode_html, except that the META tag would say # UTF-8 instead of ISO-Latin-1. expected = unicode_html.replace("ISO-Latin-1", "utf-8") # And, of course, it would be in UTF-8, not Unicode. expected = expected.encode("utf-8") # Ta-da! self.assertEqual(result, expected) def test_real_shift_jis_document(self): # Smoke test to make sure the parser can handle a document in # Shift-JIS encoding, without choking. shift_jis_html = ( b'<html><head></head><body><pre>' b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f' b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c' b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B' b'</pre></body></html>') unicode_html = shift_jis_html.decode("shift-jis") soup = self.soup(unicode_html) # Make sure the parse tree is correctly encoded to various # encodings. self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) def test_real_hebrew_document(self): # A real-world test to make sure we can convert ISO-8859-9 (a # Hebrew encoding) to UTF-8. hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>' soup = self.soup( hebrew_document, from_encoding="iso8859-8") self.assertEqual(soup.original_encoding, 'iso8859-8') self.assertEqual( soup.encode('utf-8'), hebrew_document.decode("iso8859-8").encode("utf-8")) def test_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) content = parsed_meta['content'] self.assertEqual('text/html; charset=x-sjis', content) # But that value is actually a ContentMetaAttributeValue object. self.assertTrue(isinstance(content, ContentMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('text/html; charset=utf8', content.encode("utf8")) # For the rest of the story, see TestSubstitutions in # test_tree.py. def test_html5_style_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta id="encoding" charset="x-sjis" />') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', id="encoding") charset = parsed_meta['charset'] self.assertEqual('x-sjis', charset) # But that value is actually a CharsetMetaAttributeValue object. self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('utf8', charset.encode("utf8")) def test_tag_with_no_attributes_can_have_attributes_added(self): data = self.soup("<a>text</a>") data.a['foo'] = 'bar' self.assertEqual('<a foo="bar">text</a>', data.a.decode()) class XMLTreeBuilderSmokeTest(object): def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def test_docstring_generated(self): soup = self.soup("<root/>") self.assertEqual( soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>') def test_real_xhtml_document(self): """A real XHTML document should come out *exactly* the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8"), markup) def test_formatter_processes_script_tag_for_xml_documents(self): doc = """ <script type="text/javascript"> </script> """ soup = BeautifulSoup(doc, "lxml-xml") # lxml would have stripped this while parsing, but we can add # it later. soup.script.string = 'console.log("< < hey > > ");' encoded = soup.encode() self.assertTrue(b"&lt; &lt; hey &gt; &gt;" in encoded) def test_can_parse_unicode_document(self): markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' soup = self.soup(markup) self.assertEqual('Sacr\xe9 bleu!', soup.root.string) def test_popping_namespaced_tag(self): markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>' soup = self.soup(markup) self.assertEqual( str(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("<root/>") self.assertEqual( soup.encode("latin1"), b'<?xml version="1.0" encoding="latin1"?>\n<root/>') def test_large_xml_document(self): """A large XML document should come out the same as it went in.""" markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>' + b'0' * (2**12) + b'</root>') soup = self.soup(markup) self.assertEqual(soup.encode("utf-8"), markup) def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): self.assertSoupEquals("<p>", "<p/>") self.assertSoupEquals("<p>foo</p>") def test_namespaces_are_preserved(self): markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>' soup = self.soup(markup) root = soup.root self.assertEqual("http://example.com/", root['xmlns:a']) self.assertEqual("http://example.net/", root['xmlns:b']) def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' soup = self.soup(markup) self.assertEqual(str(soup.p), markup) def test_namespaced_attributes(self): markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>' soup = self.soup(markup) self.assertEqual(str(soup.foo), markup) def test_namespaced_attributes_xml_namespace(self): markup = '<foo xml:lang="fr">bar</foo>' soup = self.soup(markup) self.assertEqual(str(soup.foo), markup) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" def test_real_xhtml_document(self): # Since XHTML is not HTML5, HTML5 parsers are not tested to handle # XHTML documents in any particular way. pass def test_html_tags_have_namespace(self): markup = "<a>" soup = self.soup(markup) self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) def test_svg_tags_have_namespace(self): markup = '<svg><circle/></svg>' soup = self.soup(markup) namespace = "http://www.w3.org/2000/svg" self.assertEqual(namespace, soup.svg.namespace) self.assertEqual(namespace, soup.circle.namespace) def test_mathml_tags_have_namespace(self): markup = '<math><msqrt>5</msqrt></math>' soup = self.soup(markup) namespace = 'http://www.w3.org/1998/Math/MathML' self.assertEqual(namespace, soup.math.namespace) self.assertEqual(namespace, soup.msqrt.namespace) def test_xml_declaration_becomes_comment(self): markup = '<?xml version="1.0" encoding="utf-8"?><html></html>' soup = self.soup(markup) self.assertTrue(isinstance(soup.contents[0], Comment)) self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') self.assertEqual("html", soup.contents[0].next_element.name) def skipIf(condition, reason): def nothing(test, *args, **kwargs): return None def decorator(test_item): if condition: return nothing else: return test_item return decorator
27,272
Python
.py
558
40.482079
237
0.63049
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,488
dammit.py
rembo10_headphones/lib/bs4/dammit.py
# -*- coding: utf-8 -*- """Beautiful Soup bonus library: Unicode, Dammit This library converts a bytestream to Unicode through any means necessary. It is heavily based on code from Mark Pilgrim's Universal Feed Parser. It works best on XML and HTML, but it does not rewrite the XML or HTML to reflect a new encoding; that's the tree builder's job. """ # Use of this source code is governed by the MIT license. __license__ = "MIT" from html.entities import codepoint2name from collections import defaultdict import codecs import re import logging import string # Import a library to autodetect character encodings. chardet_type = None try: # First try the fast C implementation. # PyPI package: cchardet import cchardet def chardet_dammit(s): if isinstance(s, str): return None return cchardet.detect(s)['encoding'] except ImportError: try: # Fall back to the pure Python implementation # Debian package: python-chardet # PyPI package: chardet import chardet def chardet_dammit(s): if isinstance(s, str): return None return chardet.detect(s)['encoding'] #import chardet.constants #chardet.constants._debug = 1 except ImportError: # No chardet available. def chardet_dammit(s): return None # Available from http://cjkpython.i18n.org/. # # TODO: This doesn't work anymore and the closest thing, iconv_codecs, # is GPL-licensed. Check whether this is still necessary. try: import iconv_codec except ImportError: pass # Build bytestring and Unicode versions of regular expressions for finding # a declared encoding inside an XML or HTML document. xml_encoding = '^\\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>' html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]' encoding_res = dict() encoding_res[bytes] = { 'html' : re.compile(html_meta.encode("ascii"), re.I), 'xml' : re.compile(xml_encoding.encode("ascii"), re.I), } encoding_res[str] = { 'html' : re.compile(html_meta, re.I), 'xml' : re.compile(xml_encoding, re.I) } try: from html.entities import html5 except ImportError: # This is a copy of html.entities.html5 from Python 3.9. There's # no equivalent table in Python 2, so we'll just provide a copy # here. html5 = { 'Aacute': '\xc1', 'aacute': '\xe1', 'Aacute;': '\xc1', 'aacute;': '\xe1', 'Abreve;': '\u0102', 'abreve;': '\u0103', 'ac;': '\u223e', 'acd;': '\u223f', 'acE;': '\u223e\u0333', 'Acirc': '\xc2', 'acirc': '\xe2', 'Acirc;': '\xc2', 'acirc;': '\xe2', 'acute': '\xb4', 'acute;': '\xb4', 'Acy;': '\u0410', 'acy;': '\u0430', 'AElig': '\xc6', 'aelig': '\xe6', 'AElig;': '\xc6', 'aelig;': '\xe6', 'af;': '\u2061', 'Afr;': '\U0001d504', 'afr;': '\U0001d51e', 'Agrave': '\xc0', 'agrave': '\xe0', 'Agrave;': '\xc0', 'agrave;': '\xe0', 'alefsym;': '\u2135', 'aleph;': '\u2135', 'Alpha;': '\u0391', 'alpha;': '\u03b1', 'Amacr;': '\u0100', 'amacr;': '\u0101', 'amalg;': '\u2a3f', 'AMP': '&', 'amp': '&', 'AMP;': '&', 'amp;': '&', 'And;': '\u2a53', 'and;': '\u2227', 'andand;': '\u2a55', 'andd;': '\u2a5c', 'andslope;': '\u2a58', 'andv;': '\u2a5a', 'ang;': '\u2220', 'ange;': '\u29a4', 'angle;': '\u2220', 'angmsd;': '\u2221', 'angmsdaa;': '\u29a8', 'angmsdab;': '\u29a9', 'angmsdac;': '\u29aa', 'angmsdad;': '\u29ab', 'angmsdae;': '\u29ac', 'angmsdaf;': '\u29ad', 'angmsdag;': '\u29ae', 'angmsdah;': '\u29af', 'angrt;': '\u221f', 'angrtvb;': '\u22be', 'angrtvbd;': '\u299d', 'angsph;': '\u2222', 'angst;': '\xc5', 'angzarr;': '\u237c', 'Aogon;': '\u0104', 'aogon;': '\u0105', 'Aopf;': '\U0001d538', 'aopf;': '\U0001d552', 'ap;': '\u2248', 'apacir;': '\u2a6f', 'apE;': '\u2a70', 'ape;': '\u224a', 'apid;': '\u224b', 'apos;': "'", 'ApplyFunction;': '\u2061', 'approx;': '\u2248', 'approxeq;': '\u224a', 'Aring': '\xc5', 'aring': '\xe5', 'Aring;': '\xc5', 'aring;': '\xe5', 'Ascr;': '\U0001d49c', 'ascr;': '\U0001d4b6', 'Assign;': '\u2254', 'ast;': '*', 'asymp;': '\u2248', 'asympeq;': '\u224d', 'Atilde': '\xc3', 'atilde': '\xe3', 'Atilde;': '\xc3', 'atilde;': '\xe3', 'Auml': '\xc4', 'auml': '\xe4', 'Auml;': '\xc4', 'auml;': '\xe4', 'awconint;': '\u2233', 'awint;': '\u2a11', 'backcong;': '\u224c', 'backepsilon;': '\u03f6', 'backprime;': '\u2035', 'backsim;': '\u223d', 'backsimeq;': '\u22cd', 'Backslash;': '\u2216', 'Barv;': '\u2ae7', 'barvee;': '\u22bd', 'Barwed;': '\u2306', 'barwed;': '\u2305', 'barwedge;': '\u2305', 'bbrk;': '\u23b5', 'bbrktbrk;': '\u23b6', 'bcong;': '\u224c', 'Bcy;': '\u0411', 'bcy;': '\u0431', 'bdquo;': '\u201e', 'becaus;': '\u2235', 'Because;': '\u2235', 'because;': '\u2235', 'bemptyv;': '\u29b0', 'bepsi;': '\u03f6', 'bernou;': '\u212c', 'Bernoullis;': '\u212c', 'Beta;': '\u0392', 'beta;': '\u03b2', 'beth;': '\u2136', 'between;': '\u226c', 'Bfr;': '\U0001d505', 'bfr;': '\U0001d51f', 'bigcap;': '\u22c2', 'bigcirc;': '\u25ef', 'bigcup;': '\u22c3', 'bigodot;': '\u2a00', 'bigoplus;': '\u2a01', 'bigotimes;': '\u2a02', 'bigsqcup;': '\u2a06', 'bigstar;': '\u2605', 'bigtriangledown;': '\u25bd', 'bigtriangleup;': '\u25b3', 'biguplus;': '\u2a04', 'bigvee;': '\u22c1', 'bigwedge;': '\u22c0', 'bkarow;': '\u290d', 'blacklozenge;': '\u29eb', 'blacksquare;': '\u25aa', 'blacktriangle;': '\u25b4', 'blacktriangledown;': '\u25be', 'blacktriangleleft;': '\u25c2', 'blacktriangleright;': '\u25b8', 'blank;': '\u2423', 'blk12;': '\u2592', 'blk14;': '\u2591', 'blk34;': '\u2593', 'block;': '\u2588', 'bne;': '=\u20e5', 'bnequiv;': '\u2261\u20e5', 'bNot;': '\u2aed', 'bnot;': '\u2310', 'Bopf;': '\U0001d539', 'bopf;': '\U0001d553', 'bot;': '\u22a5', 'bottom;': '\u22a5', 'bowtie;': '\u22c8', 'boxbox;': '\u29c9', 'boxDL;': '\u2557', 'boxDl;': '\u2556', 'boxdL;': '\u2555', 'boxdl;': '\u2510', 'boxDR;': '\u2554', 'boxDr;': '\u2553', 'boxdR;': '\u2552', 'boxdr;': '\u250c', 'boxH;': '\u2550', 'boxh;': '\u2500', 'boxHD;': '\u2566', 'boxHd;': '\u2564', 'boxhD;': '\u2565', 'boxhd;': '\u252c', 'boxHU;': '\u2569', 'boxHu;': '\u2567', 'boxhU;': '\u2568', 'boxhu;': '\u2534', 'boxminus;': '\u229f', 'boxplus;': '\u229e', 'boxtimes;': '\u22a0', 'boxUL;': '\u255d', 'boxUl;': '\u255c', 'boxuL;': '\u255b', 'boxul;': '\u2518', 'boxUR;': '\u255a', 'boxUr;': '\u2559', 'boxuR;': '\u2558', 'boxur;': '\u2514', 'boxV;': '\u2551', 'boxv;': '\u2502', 'boxVH;': '\u256c', 'boxVh;': '\u256b', 'boxvH;': '\u256a', 'boxvh;': '\u253c', 'boxVL;': '\u2563', 'boxVl;': '\u2562', 'boxvL;': '\u2561', 'boxvl;': '\u2524', 'boxVR;': '\u2560', 'boxVr;': '\u255f', 'boxvR;': '\u255e', 'boxvr;': '\u251c', 'bprime;': '\u2035', 'Breve;': '\u02d8', 'breve;': '\u02d8', 'brvbar': '\xa6', 'brvbar;': '\xa6', 'Bscr;': '\u212c', 'bscr;': '\U0001d4b7', 'bsemi;': '\u204f', 'bsim;': '\u223d', 'bsime;': '\u22cd', 'bsol;': '\\', 'bsolb;': '\u29c5', 'bsolhsub;': '\u27c8', 'bull;': '\u2022', 'bullet;': '\u2022', 'bump;': '\u224e', 'bumpE;': '\u2aae', 'bumpe;': '\u224f', 'Bumpeq;': '\u224e', 'bumpeq;': '\u224f', 'Cacute;': '\u0106', 'cacute;': '\u0107', 'Cap;': '\u22d2', 'cap;': '\u2229', 'capand;': '\u2a44', 'capbrcup;': '\u2a49', 'capcap;': '\u2a4b', 'capcup;': '\u2a47', 'capdot;': '\u2a40', 'CapitalDifferentialD;': '\u2145', 'caps;': '\u2229\ufe00', 'caret;': '\u2041', 'caron;': '\u02c7', 'Cayleys;': '\u212d', 'ccaps;': '\u2a4d', 'Ccaron;': '\u010c', 'ccaron;': '\u010d', 'Ccedil': '\xc7', 'ccedil': '\xe7', 'Ccedil;': '\xc7', 'ccedil;': '\xe7', 'Ccirc;': '\u0108', 'ccirc;': '\u0109', 'Cconint;': '\u2230', 'ccups;': '\u2a4c', 'ccupssm;': '\u2a50', 'Cdot;': '\u010a', 'cdot;': '\u010b', 'cedil': '\xb8', 'cedil;': '\xb8', 'Cedilla;': '\xb8', 'cemptyv;': '\u29b2', 'cent': '\xa2', 'cent;': '\xa2', 'CenterDot;': '\xb7', 'centerdot;': '\xb7', 'Cfr;': '\u212d', 'cfr;': '\U0001d520', 'CHcy;': '\u0427', 'chcy;': '\u0447', 'check;': '\u2713', 'checkmark;': '\u2713', 'Chi;': '\u03a7', 'chi;': '\u03c7', 'cir;': '\u25cb', 'circ;': '\u02c6', 'circeq;': '\u2257', 'circlearrowleft;': '\u21ba', 'circlearrowright;': '\u21bb', 'circledast;': '\u229b', 'circledcirc;': '\u229a', 'circleddash;': '\u229d', 'CircleDot;': '\u2299', 'circledR;': '\xae', 'circledS;': '\u24c8', 'CircleMinus;': '\u2296', 'CirclePlus;': '\u2295', 'CircleTimes;': '\u2297', 'cirE;': '\u29c3', 'cire;': '\u2257', 'cirfnint;': '\u2a10', 'cirmid;': '\u2aef', 'cirscir;': '\u29c2', 'ClockwiseContourIntegral;': '\u2232', 'CloseCurlyDoubleQuote;': '\u201d', 'CloseCurlyQuote;': '\u2019', 'clubs;': '\u2663', 'clubsuit;': '\u2663', 'Colon;': '\u2237', 'colon;': ':', 'Colone;': '\u2a74', 'colone;': '\u2254', 'coloneq;': '\u2254', 'comma;': ',', 'commat;': '@', 'comp;': '\u2201', 'compfn;': '\u2218', 'complement;': '\u2201', 'complexes;': '\u2102', 'cong;': '\u2245', 'congdot;': '\u2a6d', 'Congruent;': '\u2261', 'Conint;': '\u222f', 'conint;': '\u222e', 'ContourIntegral;': '\u222e', 'Copf;': '\u2102', 'copf;': '\U0001d554', 'coprod;': '\u2210', 'Coproduct;': '\u2210', 'COPY': '\xa9', 'copy': '\xa9', 'COPY;': '\xa9', 'copy;': '\xa9', 'copysr;': '\u2117', 'CounterClockwiseContourIntegral;': '\u2233', 'crarr;': '\u21b5', 'Cross;': '\u2a2f', 'cross;': '\u2717', 'Cscr;': '\U0001d49e', 'cscr;': '\U0001d4b8', 'csub;': '\u2acf', 'csube;': '\u2ad1', 'csup;': '\u2ad0', 'csupe;': '\u2ad2', 'ctdot;': '\u22ef', 'cudarrl;': '\u2938', 'cudarrr;': '\u2935', 'cuepr;': '\u22de', 'cuesc;': '\u22df', 'cularr;': '\u21b6', 'cularrp;': '\u293d', 'Cup;': '\u22d3', 'cup;': '\u222a', 'cupbrcap;': '\u2a48', 'CupCap;': '\u224d', 'cupcap;': '\u2a46', 'cupcup;': '\u2a4a', 'cupdot;': '\u228d', 'cupor;': '\u2a45', 'cups;': '\u222a\ufe00', 'curarr;': '\u21b7', 'curarrm;': '\u293c', 'curlyeqprec;': '\u22de', 'curlyeqsucc;': '\u22df', 'curlyvee;': '\u22ce', 'curlywedge;': '\u22cf', 'curren': '\xa4', 'curren;': '\xa4', 'curvearrowleft;': '\u21b6', 'curvearrowright;': '\u21b7', 'cuvee;': '\u22ce', 'cuwed;': '\u22cf', 'cwconint;': '\u2232', 'cwint;': '\u2231', 'cylcty;': '\u232d', 'Dagger;': '\u2021', 'dagger;': '\u2020', 'daleth;': '\u2138', 'Darr;': '\u21a1', 'dArr;': '\u21d3', 'darr;': '\u2193', 'dash;': '\u2010', 'Dashv;': '\u2ae4', 'dashv;': '\u22a3', 'dbkarow;': '\u290f', 'dblac;': '\u02dd', 'Dcaron;': '\u010e', 'dcaron;': '\u010f', 'Dcy;': '\u0414', 'dcy;': '\u0434', 'DD;': '\u2145', 'dd;': '\u2146', 'ddagger;': '\u2021', 'ddarr;': '\u21ca', 'DDotrahd;': '\u2911', 'ddotseq;': '\u2a77', 'deg': '\xb0', 'deg;': '\xb0', 'Del;': '\u2207', 'Delta;': '\u0394', 'delta;': '\u03b4', 'demptyv;': '\u29b1', 'dfisht;': '\u297f', 'Dfr;': '\U0001d507', 'dfr;': '\U0001d521', 'dHar;': '\u2965', 'dharl;': '\u21c3', 'dharr;': '\u21c2', 'DiacriticalAcute;': '\xb4', 'DiacriticalDot;': '\u02d9', 'DiacriticalDoubleAcute;': '\u02dd', 'DiacriticalGrave;': '`', 'DiacriticalTilde;': '\u02dc', 'diam;': '\u22c4', 'Diamond;': '\u22c4', 'diamond;': '\u22c4', 'diamondsuit;': '\u2666', 'diams;': '\u2666', 'die;': '\xa8', 'DifferentialD;': '\u2146', 'digamma;': '\u03dd', 'disin;': '\u22f2', 'div;': '\xf7', 'divide': '\xf7', 'divide;': '\xf7', 'divideontimes;': '\u22c7', 'divonx;': '\u22c7', 'DJcy;': '\u0402', 'djcy;': '\u0452', 'dlcorn;': '\u231e', 'dlcrop;': '\u230d', 'dollar;': '$', 'Dopf;': '\U0001d53b', 'dopf;': '\U0001d555', 'Dot;': '\xa8', 'dot;': '\u02d9', 'DotDot;': '\u20dc', 'doteq;': '\u2250', 'doteqdot;': '\u2251', 'DotEqual;': '\u2250', 'dotminus;': '\u2238', 'dotplus;': '\u2214', 'dotsquare;': '\u22a1', 'doublebarwedge;': '\u2306', 'DoubleContourIntegral;': '\u222f', 'DoubleDot;': '\xa8', 'DoubleDownArrow;': '\u21d3', 'DoubleLeftArrow;': '\u21d0', 'DoubleLeftRightArrow;': '\u21d4', 'DoubleLeftTee;': '\u2ae4', 'DoubleLongLeftArrow;': '\u27f8', 'DoubleLongLeftRightArrow;': '\u27fa', 'DoubleLongRightArrow;': '\u27f9', 'DoubleRightArrow;': '\u21d2', 'DoubleRightTee;': '\u22a8', 'DoubleUpArrow;': '\u21d1', 'DoubleUpDownArrow;': '\u21d5', 'DoubleVerticalBar;': '\u2225', 'DownArrow;': '\u2193', 'Downarrow;': '\u21d3', 'downarrow;': '\u2193', 'DownArrowBar;': '\u2913', 'DownArrowUpArrow;': '\u21f5', 'DownBreve;': '\u0311', 'downdownarrows;': '\u21ca', 'downharpoonleft;': '\u21c3', 'downharpoonright;': '\u21c2', 'DownLeftRightVector;': '\u2950', 'DownLeftTeeVector;': '\u295e', 'DownLeftVector;': '\u21bd', 'DownLeftVectorBar;': '\u2956', 'DownRightTeeVector;': '\u295f', 'DownRightVector;': '\u21c1', 'DownRightVectorBar;': '\u2957', 'DownTee;': '\u22a4', 'DownTeeArrow;': '\u21a7', 'drbkarow;': '\u2910', 'drcorn;': '\u231f', 'drcrop;': '\u230c', 'Dscr;': '\U0001d49f', 'dscr;': '\U0001d4b9', 'DScy;': '\u0405', 'dscy;': '\u0455', 'dsol;': '\u29f6', 'Dstrok;': '\u0110', 'dstrok;': '\u0111', 'dtdot;': '\u22f1', 'dtri;': '\u25bf', 'dtrif;': '\u25be', 'duarr;': '\u21f5', 'duhar;': '\u296f', 'dwangle;': '\u29a6', 'DZcy;': '\u040f', 'dzcy;': '\u045f', 'dzigrarr;': '\u27ff', 'Eacute': '\xc9', 'eacute': '\xe9', 'Eacute;': '\xc9', 'eacute;': '\xe9', 'easter;': '\u2a6e', 'Ecaron;': '\u011a', 'ecaron;': '\u011b', 'ecir;': '\u2256', 'Ecirc': '\xca', 'ecirc': '\xea', 'Ecirc;': '\xca', 'ecirc;': '\xea', 'ecolon;': '\u2255', 'Ecy;': '\u042d', 'ecy;': '\u044d', 'eDDot;': '\u2a77', 'Edot;': '\u0116', 'eDot;': '\u2251', 'edot;': '\u0117', 'ee;': '\u2147', 'efDot;': '\u2252', 'Efr;': '\U0001d508', 'efr;': '\U0001d522', 'eg;': '\u2a9a', 'Egrave': '\xc8', 'egrave': '\xe8', 'Egrave;': '\xc8', 'egrave;': '\xe8', 'egs;': '\u2a96', 'egsdot;': '\u2a98', 'el;': '\u2a99', 'Element;': '\u2208', 'elinters;': '\u23e7', 'ell;': '\u2113', 'els;': '\u2a95', 'elsdot;': '\u2a97', 'Emacr;': '\u0112', 'emacr;': '\u0113', 'empty;': '\u2205', 'emptyset;': '\u2205', 'EmptySmallSquare;': '\u25fb', 'emptyv;': '\u2205', 'EmptyVerySmallSquare;': '\u25ab', 'emsp13;': '\u2004', 'emsp14;': '\u2005', 'emsp;': '\u2003', 'ENG;': '\u014a', 'eng;': '\u014b', 'ensp;': '\u2002', 'Eogon;': '\u0118', 'eogon;': '\u0119', 'Eopf;': '\U0001d53c', 'eopf;': '\U0001d556', 'epar;': '\u22d5', 'eparsl;': '\u29e3', 'eplus;': '\u2a71', 'epsi;': '\u03b5', 'Epsilon;': '\u0395', 'epsilon;': '\u03b5', 'epsiv;': '\u03f5', 'eqcirc;': '\u2256', 'eqcolon;': '\u2255', 'eqsim;': '\u2242', 'eqslantgtr;': '\u2a96', 'eqslantless;': '\u2a95', 'Equal;': '\u2a75', 'equals;': '=', 'EqualTilde;': '\u2242', 'equest;': '\u225f', 'Equilibrium;': '\u21cc', 'equiv;': '\u2261', 'equivDD;': '\u2a78', 'eqvparsl;': '\u29e5', 'erarr;': '\u2971', 'erDot;': '\u2253', 'Escr;': '\u2130', 'escr;': '\u212f', 'esdot;': '\u2250', 'Esim;': '\u2a73', 'esim;': '\u2242', 'Eta;': '\u0397', 'eta;': '\u03b7', 'ETH': '\xd0', 'eth': '\xf0', 'ETH;': '\xd0', 'eth;': '\xf0', 'Euml': '\xcb', 'euml': '\xeb', 'Euml;': '\xcb', 'euml;': '\xeb', 'euro;': '\u20ac', 'excl;': '!', 'exist;': '\u2203', 'Exists;': '\u2203', 'expectation;': '\u2130', 'ExponentialE;': '\u2147', 'exponentiale;': '\u2147', 'fallingdotseq;': '\u2252', 'Fcy;': '\u0424', 'fcy;': '\u0444', 'female;': '\u2640', 'ffilig;': '\ufb03', 'fflig;': '\ufb00', 'ffllig;': '\ufb04', 'Ffr;': '\U0001d509', 'ffr;': '\U0001d523', 'filig;': '\ufb01', 'FilledSmallSquare;': '\u25fc', 'FilledVerySmallSquare;': '\u25aa', 'fjlig;': 'fj', 'flat;': '\u266d', 'fllig;': '\ufb02', 'fltns;': '\u25b1', 'fnof;': '\u0192', 'Fopf;': '\U0001d53d', 'fopf;': '\U0001d557', 'ForAll;': '\u2200', 'forall;': '\u2200', 'fork;': '\u22d4', 'forkv;': '\u2ad9', 'Fouriertrf;': '\u2131', 'fpartint;': '\u2a0d', 'frac12': '\xbd', 'frac12;': '\xbd', 'frac13;': '\u2153', 'frac14': '\xbc', 'frac14;': '\xbc', 'frac15;': '\u2155', 'frac16;': '\u2159', 'frac18;': '\u215b', 'frac23;': '\u2154', 'frac25;': '\u2156', 'frac34': '\xbe', 'frac34;': '\xbe', 'frac35;': '\u2157', 'frac38;': '\u215c', 'frac45;': '\u2158', 'frac56;': '\u215a', 'frac58;': '\u215d', 'frac78;': '\u215e', 'frasl;': '\u2044', 'frown;': '\u2322', 'Fscr;': '\u2131', 'fscr;': '\U0001d4bb', 'gacute;': '\u01f5', 'Gamma;': '\u0393', 'gamma;': '\u03b3', 'Gammad;': '\u03dc', 'gammad;': '\u03dd', 'gap;': '\u2a86', 'Gbreve;': '\u011e', 'gbreve;': '\u011f', 'Gcedil;': '\u0122', 'Gcirc;': '\u011c', 'gcirc;': '\u011d', 'Gcy;': '\u0413', 'gcy;': '\u0433', 'Gdot;': '\u0120', 'gdot;': '\u0121', 'gE;': '\u2267', 'ge;': '\u2265', 'gEl;': '\u2a8c', 'gel;': '\u22db', 'geq;': '\u2265', 'geqq;': '\u2267', 'geqslant;': '\u2a7e', 'ges;': '\u2a7e', 'gescc;': '\u2aa9', 'gesdot;': '\u2a80', 'gesdoto;': '\u2a82', 'gesdotol;': '\u2a84', 'gesl;': '\u22db\ufe00', 'gesles;': '\u2a94', 'Gfr;': '\U0001d50a', 'gfr;': '\U0001d524', 'Gg;': '\u22d9', 'gg;': '\u226b', 'ggg;': '\u22d9', 'gimel;': '\u2137', 'GJcy;': '\u0403', 'gjcy;': '\u0453', 'gl;': '\u2277', 'gla;': '\u2aa5', 'glE;': '\u2a92', 'glj;': '\u2aa4', 'gnap;': '\u2a8a', 'gnapprox;': '\u2a8a', 'gnE;': '\u2269', 'gne;': '\u2a88', 'gneq;': '\u2a88', 'gneqq;': '\u2269', 'gnsim;': '\u22e7', 'Gopf;': '\U0001d53e', 'gopf;': '\U0001d558', 'grave;': '`', 'GreaterEqual;': '\u2265', 'GreaterEqualLess;': '\u22db', 'GreaterFullEqual;': '\u2267', 'GreaterGreater;': '\u2aa2', 'GreaterLess;': '\u2277', 'GreaterSlantEqual;': '\u2a7e', 'GreaterTilde;': '\u2273', 'Gscr;': '\U0001d4a2', 'gscr;': '\u210a', 'gsim;': '\u2273', 'gsime;': '\u2a8e', 'gsiml;': '\u2a90', 'GT': '>', 'gt': '>', 'GT;': '>', 'Gt;': '\u226b', 'gt;': '>', 'gtcc;': '\u2aa7', 'gtcir;': '\u2a7a', 'gtdot;': '\u22d7', 'gtlPar;': '\u2995', 'gtquest;': '\u2a7c', 'gtrapprox;': '\u2a86', 'gtrarr;': '\u2978', 'gtrdot;': '\u22d7', 'gtreqless;': '\u22db', 'gtreqqless;': '\u2a8c', 'gtrless;': '\u2277', 'gtrsim;': '\u2273', 'gvertneqq;': '\u2269\ufe00', 'gvnE;': '\u2269\ufe00', 'Hacek;': '\u02c7', 'hairsp;': '\u200a', 'half;': '\xbd', 'hamilt;': '\u210b', 'HARDcy;': '\u042a', 'hardcy;': '\u044a', 'hArr;': '\u21d4', 'harr;': '\u2194', 'harrcir;': '\u2948', 'harrw;': '\u21ad', 'Hat;': '^', 'hbar;': '\u210f', 'Hcirc;': '\u0124', 'hcirc;': '\u0125', 'hearts;': '\u2665', 'heartsuit;': '\u2665', 'hellip;': '\u2026', 'hercon;': '\u22b9', 'Hfr;': '\u210c', 'hfr;': '\U0001d525', 'HilbertSpace;': '\u210b', 'hksearow;': '\u2925', 'hkswarow;': '\u2926', 'hoarr;': '\u21ff', 'homtht;': '\u223b', 'hookleftarrow;': '\u21a9', 'hookrightarrow;': '\u21aa', 'Hopf;': '\u210d', 'hopf;': '\U0001d559', 'horbar;': '\u2015', 'HorizontalLine;': '\u2500', 'Hscr;': '\u210b', 'hscr;': '\U0001d4bd', 'hslash;': '\u210f', 'Hstrok;': '\u0126', 'hstrok;': '\u0127', 'HumpDownHump;': '\u224e', 'HumpEqual;': '\u224f', 'hybull;': '\u2043', 'hyphen;': '\u2010', 'Iacute': '\xcd', 'iacute': '\xed', 'Iacute;': '\xcd', 'iacute;': '\xed', 'ic;': '\u2063', 'Icirc': '\xce', 'icirc': '\xee', 'Icirc;': '\xce', 'icirc;': '\xee', 'Icy;': '\u0418', 'icy;': '\u0438', 'Idot;': '\u0130', 'IEcy;': '\u0415', 'iecy;': '\u0435', 'iexcl': '\xa1', 'iexcl;': '\xa1', 'iff;': '\u21d4', 'Ifr;': '\u2111', 'ifr;': '\U0001d526', 'Igrave': '\xcc', 'igrave': '\xec', 'Igrave;': '\xcc', 'igrave;': '\xec', 'ii;': '\u2148', 'iiiint;': '\u2a0c', 'iiint;': '\u222d', 'iinfin;': '\u29dc', 'iiota;': '\u2129', 'IJlig;': '\u0132', 'ijlig;': '\u0133', 'Im;': '\u2111', 'Imacr;': '\u012a', 'imacr;': '\u012b', 'image;': '\u2111', 'ImaginaryI;': '\u2148', 'imagline;': '\u2110', 'imagpart;': '\u2111', 'imath;': '\u0131', 'imof;': '\u22b7', 'imped;': '\u01b5', 'Implies;': '\u21d2', 'in;': '\u2208', 'incare;': '\u2105', 'infin;': '\u221e', 'infintie;': '\u29dd', 'inodot;': '\u0131', 'Int;': '\u222c', 'int;': '\u222b', 'intcal;': '\u22ba', 'integers;': '\u2124', 'Integral;': '\u222b', 'intercal;': '\u22ba', 'Intersection;': '\u22c2', 'intlarhk;': '\u2a17', 'intprod;': '\u2a3c', 'InvisibleComma;': '\u2063', 'InvisibleTimes;': '\u2062', 'IOcy;': '\u0401', 'iocy;': '\u0451', 'Iogon;': '\u012e', 'iogon;': '\u012f', 'Iopf;': '\U0001d540', 'iopf;': '\U0001d55a', 'Iota;': '\u0399', 'iota;': '\u03b9', 'iprod;': '\u2a3c', 'iquest': '\xbf', 'iquest;': '\xbf', 'Iscr;': '\u2110', 'iscr;': '\U0001d4be', 'isin;': '\u2208', 'isindot;': '\u22f5', 'isinE;': '\u22f9', 'isins;': '\u22f4', 'isinsv;': '\u22f3', 'isinv;': '\u2208', 'it;': '\u2062', 'Itilde;': '\u0128', 'itilde;': '\u0129', 'Iukcy;': '\u0406', 'iukcy;': '\u0456', 'Iuml': '\xcf', 'iuml': '\xef', 'Iuml;': '\xcf', 'iuml;': '\xef', 'Jcirc;': '\u0134', 'jcirc;': '\u0135', 'Jcy;': '\u0419', 'jcy;': '\u0439', 'Jfr;': '\U0001d50d', 'jfr;': '\U0001d527', 'jmath;': '\u0237', 'Jopf;': '\U0001d541', 'jopf;': '\U0001d55b', 'Jscr;': '\U0001d4a5', 'jscr;': '\U0001d4bf', 'Jsercy;': '\u0408', 'jsercy;': '\u0458', 'Jukcy;': '\u0404', 'jukcy;': '\u0454', 'Kappa;': '\u039a', 'kappa;': '\u03ba', 'kappav;': '\u03f0', 'Kcedil;': '\u0136', 'kcedil;': '\u0137', 'Kcy;': '\u041a', 'kcy;': '\u043a', 'Kfr;': '\U0001d50e', 'kfr;': '\U0001d528', 'kgreen;': '\u0138', 'KHcy;': '\u0425', 'khcy;': '\u0445', 'KJcy;': '\u040c', 'kjcy;': '\u045c', 'Kopf;': '\U0001d542', 'kopf;': '\U0001d55c', 'Kscr;': '\U0001d4a6', 'kscr;': '\U0001d4c0', 'lAarr;': '\u21da', 'Lacute;': '\u0139', 'lacute;': '\u013a', 'laemptyv;': '\u29b4', 'lagran;': '\u2112', 'Lambda;': '\u039b', 'lambda;': '\u03bb', 'Lang;': '\u27ea', 'lang;': '\u27e8', 'langd;': '\u2991', 'langle;': '\u27e8', 'lap;': '\u2a85', 'Laplacetrf;': '\u2112', 'laquo': '\xab', 'laquo;': '\xab', 'Larr;': '\u219e', 'lArr;': '\u21d0', 'larr;': '\u2190', 'larrb;': '\u21e4', 'larrbfs;': '\u291f', 'larrfs;': '\u291d', 'larrhk;': '\u21a9', 'larrlp;': '\u21ab', 'larrpl;': '\u2939', 'larrsim;': '\u2973', 'larrtl;': '\u21a2', 'lat;': '\u2aab', 'lAtail;': '\u291b', 'latail;': '\u2919', 'late;': '\u2aad', 'lates;': '\u2aad\ufe00', 'lBarr;': '\u290e', 'lbarr;': '\u290c', 'lbbrk;': '\u2772', 'lbrace;': '{', 'lbrack;': '[', 'lbrke;': '\u298b', 'lbrksld;': '\u298f', 'lbrkslu;': '\u298d', 'Lcaron;': '\u013d', 'lcaron;': '\u013e', 'Lcedil;': '\u013b', 'lcedil;': '\u013c', 'lceil;': '\u2308', 'lcub;': '{', 'Lcy;': '\u041b', 'lcy;': '\u043b', 'ldca;': '\u2936', 'ldquo;': '\u201c', 'ldquor;': '\u201e', 'ldrdhar;': '\u2967', 'ldrushar;': '\u294b', 'ldsh;': '\u21b2', 'lE;': '\u2266', 'le;': '\u2264', 'LeftAngleBracket;': '\u27e8', 'LeftArrow;': '\u2190', 'Leftarrow;': '\u21d0', 'leftarrow;': '\u2190', 'LeftArrowBar;': '\u21e4', 'LeftArrowRightArrow;': '\u21c6', 'leftarrowtail;': '\u21a2', 'LeftCeiling;': '\u2308', 'LeftDoubleBracket;': '\u27e6', 'LeftDownTeeVector;': '\u2961', 'LeftDownVector;': '\u21c3', 'LeftDownVectorBar;': '\u2959', 'LeftFloor;': '\u230a', 'leftharpoondown;': '\u21bd', 'leftharpoonup;': '\u21bc', 'leftleftarrows;': '\u21c7', 'LeftRightArrow;': '\u2194', 'Leftrightarrow;': '\u21d4', 'leftrightarrow;': '\u2194', 'leftrightarrows;': '\u21c6', 'leftrightharpoons;': '\u21cb', 'leftrightsquigarrow;': '\u21ad', 'LeftRightVector;': '\u294e', 'LeftTee;': '\u22a3', 'LeftTeeArrow;': '\u21a4', 'LeftTeeVector;': '\u295a', 'leftthreetimes;': '\u22cb', 'LeftTriangle;': '\u22b2', 'LeftTriangleBar;': '\u29cf', 'LeftTriangleEqual;': '\u22b4', 'LeftUpDownVector;': '\u2951', 'LeftUpTeeVector;': '\u2960', 'LeftUpVector;': '\u21bf', 'LeftUpVectorBar;': '\u2958', 'LeftVector;': '\u21bc', 'LeftVectorBar;': '\u2952', 'lEg;': '\u2a8b', 'leg;': '\u22da', 'leq;': '\u2264', 'leqq;': '\u2266', 'leqslant;': '\u2a7d', 'les;': '\u2a7d', 'lescc;': '\u2aa8', 'lesdot;': '\u2a7f', 'lesdoto;': '\u2a81', 'lesdotor;': '\u2a83', 'lesg;': '\u22da\ufe00', 'lesges;': '\u2a93', 'lessapprox;': '\u2a85', 'lessdot;': '\u22d6', 'lesseqgtr;': '\u22da', 'lesseqqgtr;': '\u2a8b', 'LessEqualGreater;': '\u22da', 'LessFullEqual;': '\u2266', 'LessGreater;': '\u2276', 'lessgtr;': '\u2276', 'LessLess;': '\u2aa1', 'lesssim;': '\u2272', 'LessSlantEqual;': '\u2a7d', 'LessTilde;': '\u2272', 'lfisht;': '\u297c', 'lfloor;': '\u230a', 'Lfr;': '\U0001d50f', 'lfr;': '\U0001d529', 'lg;': '\u2276', 'lgE;': '\u2a91', 'lHar;': '\u2962', 'lhard;': '\u21bd', 'lharu;': '\u21bc', 'lharul;': '\u296a', 'lhblk;': '\u2584', 'LJcy;': '\u0409', 'ljcy;': '\u0459', 'Ll;': '\u22d8', 'll;': '\u226a', 'llarr;': '\u21c7', 'llcorner;': '\u231e', 'Lleftarrow;': '\u21da', 'llhard;': '\u296b', 'lltri;': '\u25fa', 'Lmidot;': '\u013f', 'lmidot;': '\u0140', 'lmoust;': '\u23b0', 'lmoustache;': '\u23b0', 'lnap;': '\u2a89', 'lnapprox;': '\u2a89', 'lnE;': '\u2268', 'lne;': '\u2a87', 'lneq;': '\u2a87', 'lneqq;': '\u2268', 'lnsim;': '\u22e6', 'loang;': '\u27ec', 'loarr;': '\u21fd', 'lobrk;': '\u27e6', 'LongLeftArrow;': '\u27f5', 'Longleftarrow;': '\u27f8', 'longleftarrow;': '\u27f5', 'LongLeftRightArrow;': '\u27f7', 'Longleftrightarrow;': '\u27fa', 'longleftrightarrow;': '\u27f7', 'longmapsto;': '\u27fc', 'LongRightArrow;': '\u27f6', 'Longrightarrow;': '\u27f9', 'longrightarrow;': '\u27f6', 'looparrowleft;': '\u21ab', 'looparrowright;': '\u21ac', 'lopar;': '\u2985', 'Lopf;': '\U0001d543', 'lopf;': '\U0001d55d', 'loplus;': '\u2a2d', 'lotimes;': '\u2a34', 'lowast;': '\u2217', 'lowbar;': '_', 'LowerLeftArrow;': '\u2199', 'LowerRightArrow;': '\u2198', 'loz;': '\u25ca', 'lozenge;': '\u25ca', 'lozf;': '\u29eb', 'lpar;': '(', 'lparlt;': '\u2993', 'lrarr;': '\u21c6', 'lrcorner;': '\u231f', 'lrhar;': '\u21cb', 'lrhard;': '\u296d', 'lrm;': '\u200e', 'lrtri;': '\u22bf', 'lsaquo;': '\u2039', 'Lscr;': '\u2112', 'lscr;': '\U0001d4c1', 'Lsh;': '\u21b0', 'lsh;': '\u21b0', 'lsim;': '\u2272', 'lsime;': '\u2a8d', 'lsimg;': '\u2a8f', 'lsqb;': '[', 'lsquo;': '\u2018', 'lsquor;': '\u201a', 'Lstrok;': '\u0141', 'lstrok;': '\u0142', 'LT': '<', 'lt': '<', 'LT;': '<', 'Lt;': '\u226a', 'lt;': '<', 'ltcc;': '\u2aa6', 'ltcir;': '\u2a79', 'ltdot;': '\u22d6', 'lthree;': '\u22cb', 'ltimes;': '\u22c9', 'ltlarr;': '\u2976', 'ltquest;': '\u2a7b', 'ltri;': '\u25c3', 'ltrie;': '\u22b4', 'ltrif;': '\u25c2', 'ltrPar;': '\u2996', 'lurdshar;': '\u294a', 'luruhar;': '\u2966', 'lvertneqq;': '\u2268\ufe00', 'lvnE;': '\u2268\ufe00', 'macr': '\xaf', 'macr;': '\xaf', 'male;': '\u2642', 'malt;': '\u2720', 'maltese;': '\u2720', 'Map;': '\u2905', 'map;': '\u21a6', 'mapsto;': '\u21a6', 'mapstodown;': '\u21a7', 'mapstoleft;': '\u21a4', 'mapstoup;': '\u21a5', 'marker;': '\u25ae', 'mcomma;': '\u2a29', 'Mcy;': '\u041c', 'mcy;': '\u043c', 'mdash;': '\u2014', 'mDDot;': '\u223a', 'measuredangle;': '\u2221', 'MediumSpace;': '\u205f', 'Mellintrf;': '\u2133', 'Mfr;': '\U0001d510', 'mfr;': '\U0001d52a', 'mho;': '\u2127', 'micro': '\xb5', 'micro;': '\xb5', 'mid;': '\u2223', 'midast;': '*', 'midcir;': '\u2af0', 'middot': '\xb7', 'middot;': '\xb7', 'minus;': '\u2212', 'minusb;': '\u229f', 'minusd;': '\u2238', 'minusdu;': '\u2a2a', 'MinusPlus;': '\u2213', 'mlcp;': '\u2adb', 'mldr;': '\u2026', 'mnplus;': '\u2213', 'models;': '\u22a7', 'Mopf;': '\U0001d544', 'mopf;': '\U0001d55e', 'mp;': '\u2213', 'Mscr;': '\u2133', 'mscr;': '\U0001d4c2', 'mstpos;': '\u223e', 'Mu;': '\u039c', 'mu;': '\u03bc', 'multimap;': '\u22b8', 'mumap;': '\u22b8', 'nabla;': '\u2207', 'Nacute;': '\u0143', 'nacute;': '\u0144', 'nang;': '\u2220\u20d2', 'nap;': '\u2249', 'napE;': '\u2a70\u0338', 'napid;': '\u224b\u0338', 'napos;': '\u0149', 'napprox;': '\u2249', 'natur;': '\u266e', 'natural;': '\u266e', 'naturals;': '\u2115', 'nbsp': '\xa0', 'nbsp;': '\xa0', 'nbump;': '\u224e\u0338', 'nbumpe;': '\u224f\u0338', 'ncap;': '\u2a43', 'Ncaron;': '\u0147', 'ncaron;': '\u0148', 'Ncedil;': '\u0145', 'ncedil;': '\u0146', 'ncong;': '\u2247', 'ncongdot;': '\u2a6d\u0338', 'ncup;': '\u2a42', 'Ncy;': '\u041d', 'ncy;': '\u043d', 'ndash;': '\u2013', 'ne;': '\u2260', 'nearhk;': '\u2924', 'neArr;': '\u21d7', 'nearr;': '\u2197', 'nearrow;': '\u2197', 'nedot;': '\u2250\u0338', 'NegativeMediumSpace;': '\u200b', 'NegativeThickSpace;': '\u200b', 'NegativeThinSpace;': '\u200b', 'NegativeVeryThinSpace;': '\u200b', 'nequiv;': '\u2262', 'nesear;': '\u2928', 'nesim;': '\u2242\u0338', 'NestedGreaterGreater;': '\u226b', 'NestedLessLess;': '\u226a', 'NewLine;': '\n', 'nexist;': '\u2204', 'nexists;': '\u2204', 'Nfr;': '\U0001d511', 'nfr;': '\U0001d52b', 'ngE;': '\u2267\u0338', 'nge;': '\u2271', 'ngeq;': '\u2271', 'ngeqq;': '\u2267\u0338', 'ngeqslant;': '\u2a7e\u0338', 'nges;': '\u2a7e\u0338', 'nGg;': '\u22d9\u0338', 'ngsim;': '\u2275', 'nGt;': '\u226b\u20d2', 'ngt;': '\u226f', 'ngtr;': '\u226f', 'nGtv;': '\u226b\u0338', 'nhArr;': '\u21ce', 'nharr;': '\u21ae', 'nhpar;': '\u2af2', 'ni;': '\u220b', 'nis;': '\u22fc', 'nisd;': '\u22fa', 'niv;': '\u220b', 'NJcy;': '\u040a', 'njcy;': '\u045a', 'nlArr;': '\u21cd', 'nlarr;': '\u219a', 'nldr;': '\u2025', 'nlE;': '\u2266\u0338', 'nle;': '\u2270', 'nLeftarrow;': '\u21cd', 'nleftarrow;': '\u219a', 'nLeftrightarrow;': '\u21ce', 'nleftrightarrow;': '\u21ae', 'nleq;': '\u2270', 'nleqq;': '\u2266\u0338', 'nleqslant;': '\u2a7d\u0338', 'nles;': '\u2a7d\u0338', 'nless;': '\u226e', 'nLl;': '\u22d8\u0338', 'nlsim;': '\u2274', 'nLt;': '\u226a\u20d2', 'nlt;': '\u226e', 'nltri;': '\u22ea', 'nltrie;': '\u22ec', 'nLtv;': '\u226a\u0338', 'nmid;': '\u2224', 'NoBreak;': '\u2060', 'NonBreakingSpace;': '\xa0', 'Nopf;': '\u2115', 'nopf;': '\U0001d55f', 'not': '\xac', 'Not;': '\u2aec', 'not;': '\xac', 'NotCongruent;': '\u2262', 'NotCupCap;': '\u226d', 'NotDoubleVerticalBar;': '\u2226', 'NotElement;': '\u2209', 'NotEqual;': '\u2260', 'NotEqualTilde;': '\u2242\u0338', 'NotExists;': '\u2204', 'NotGreater;': '\u226f', 'NotGreaterEqual;': '\u2271', 'NotGreaterFullEqual;': '\u2267\u0338', 'NotGreaterGreater;': '\u226b\u0338', 'NotGreaterLess;': '\u2279', 'NotGreaterSlantEqual;': '\u2a7e\u0338', 'NotGreaterTilde;': '\u2275', 'NotHumpDownHump;': '\u224e\u0338', 'NotHumpEqual;': '\u224f\u0338', 'notin;': '\u2209', 'notindot;': '\u22f5\u0338', 'notinE;': '\u22f9\u0338', 'notinva;': '\u2209', 'notinvb;': '\u22f7', 'notinvc;': '\u22f6', 'NotLeftTriangle;': '\u22ea', 'NotLeftTriangleBar;': '\u29cf\u0338', 'NotLeftTriangleEqual;': '\u22ec', 'NotLess;': '\u226e', 'NotLessEqual;': '\u2270', 'NotLessGreater;': '\u2278', 'NotLessLess;': '\u226a\u0338', 'NotLessSlantEqual;': '\u2a7d\u0338', 'NotLessTilde;': '\u2274', 'NotNestedGreaterGreater;': '\u2aa2\u0338', 'NotNestedLessLess;': '\u2aa1\u0338', 'notni;': '\u220c', 'notniva;': '\u220c', 'notnivb;': '\u22fe', 'notnivc;': '\u22fd', 'NotPrecedes;': '\u2280', 'NotPrecedesEqual;': '\u2aaf\u0338', 'NotPrecedesSlantEqual;': '\u22e0', 'NotReverseElement;': '\u220c', 'NotRightTriangle;': '\u22eb', 'NotRightTriangleBar;': '\u29d0\u0338', 'NotRightTriangleEqual;': '\u22ed', 'NotSquareSubset;': '\u228f\u0338', 'NotSquareSubsetEqual;': '\u22e2', 'NotSquareSuperset;': '\u2290\u0338', 'NotSquareSupersetEqual;': '\u22e3', 'NotSubset;': '\u2282\u20d2', 'NotSubsetEqual;': '\u2288', 'NotSucceeds;': '\u2281', 'NotSucceedsEqual;': '\u2ab0\u0338', 'NotSucceedsSlantEqual;': '\u22e1', 'NotSucceedsTilde;': '\u227f\u0338', 'NotSuperset;': '\u2283\u20d2', 'NotSupersetEqual;': '\u2289', 'NotTilde;': '\u2241', 'NotTildeEqual;': '\u2244', 'NotTildeFullEqual;': '\u2247', 'NotTildeTilde;': '\u2249', 'NotVerticalBar;': '\u2224', 'npar;': '\u2226', 'nparallel;': '\u2226', 'nparsl;': '\u2afd\u20e5', 'npart;': '\u2202\u0338', 'npolint;': '\u2a14', 'npr;': '\u2280', 'nprcue;': '\u22e0', 'npre;': '\u2aaf\u0338', 'nprec;': '\u2280', 'npreceq;': '\u2aaf\u0338', 'nrArr;': '\u21cf', 'nrarr;': '\u219b', 'nrarrc;': '\u2933\u0338', 'nrarrw;': '\u219d\u0338', 'nRightarrow;': '\u21cf', 'nrightarrow;': '\u219b', 'nrtri;': '\u22eb', 'nrtrie;': '\u22ed', 'nsc;': '\u2281', 'nsccue;': '\u22e1', 'nsce;': '\u2ab0\u0338', 'Nscr;': '\U0001d4a9', 'nscr;': '\U0001d4c3', 'nshortmid;': '\u2224', 'nshortparallel;': '\u2226', 'nsim;': '\u2241', 'nsime;': '\u2244', 'nsimeq;': '\u2244', 'nsmid;': '\u2224', 'nspar;': '\u2226', 'nsqsube;': '\u22e2', 'nsqsupe;': '\u22e3', 'nsub;': '\u2284', 'nsubE;': '\u2ac5\u0338', 'nsube;': '\u2288', 'nsubset;': '\u2282\u20d2', 'nsubseteq;': '\u2288', 'nsubseteqq;': '\u2ac5\u0338', 'nsucc;': '\u2281', 'nsucceq;': '\u2ab0\u0338', 'nsup;': '\u2285', 'nsupE;': '\u2ac6\u0338', 'nsupe;': '\u2289', 'nsupset;': '\u2283\u20d2', 'nsupseteq;': '\u2289', 'nsupseteqq;': '\u2ac6\u0338', 'ntgl;': '\u2279', 'Ntilde': '\xd1', 'ntilde': '\xf1', 'Ntilde;': '\xd1', 'ntilde;': '\xf1', 'ntlg;': '\u2278', 'ntriangleleft;': '\u22ea', 'ntrianglelefteq;': '\u22ec', 'ntriangleright;': '\u22eb', 'ntrianglerighteq;': '\u22ed', 'Nu;': '\u039d', 'nu;': '\u03bd', 'num;': '#', 'numero;': '\u2116', 'numsp;': '\u2007', 'nvap;': '\u224d\u20d2', 'nVDash;': '\u22af', 'nVdash;': '\u22ae', 'nvDash;': '\u22ad', 'nvdash;': '\u22ac', 'nvge;': '\u2265\u20d2', 'nvgt;': '>\u20d2', 'nvHarr;': '\u2904', 'nvinfin;': '\u29de', 'nvlArr;': '\u2902', 'nvle;': '\u2264\u20d2', 'nvlt;': '<\u20d2', 'nvltrie;': '\u22b4\u20d2', 'nvrArr;': '\u2903', 'nvrtrie;': '\u22b5\u20d2', 'nvsim;': '\u223c\u20d2', 'nwarhk;': '\u2923', 'nwArr;': '\u21d6', 'nwarr;': '\u2196', 'nwarrow;': '\u2196', 'nwnear;': '\u2927', 'Oacute': '\xd3', 'oacute': '\xf3', 'Oacute;': '\xd3', 'oacute;': '\xf3', 'oast;': '\u229b', 'ocir;': '\u229a', 'Ocirc': '\xd4', 'ocirc': '\xf4', 'Ocirc;': '\xd4', 'ocirc;': '\xf4', 'Ocy;': '\u041e', 'ocy;': '\u043e', 'odash;': '\u229d', 'Odblac;': '\u0150', 'odblac;': '\u0151', 'odiv;': '\u2a38', 'odot;': '\u2299', 'odsold;': '\u29bc', 'OElig;': '\u0152', 'oelig;': '\u0153', 'ofcir;': '\u29bf', 'Ofr;': '\U0001d512', 'ofr;': '\U0001d52c', 'ogon;': '\u02db', 'Ograve': '\xd2', 'ograve': '\xf2', 'Ograve;': '\xd2', 'ograve;': '\xf2', 'ogt;': '\u29c1', 'ohbar;': '\u29b5', 'ohm;': '\u03a9', 'oint;': '\u222e', 'olarr;': '\u21ba', 'olcir;': '\u29be', 'olcross;': '\u29bb', 'oline;': '\u203e', 'olt;': '\u29c0', 'Omacr;': '\u014c', 'omacr;': '\u014d', 'Omega;': '\u03a9', 'omega;': '\u03c9', 'Omicron;': '\u039f', 'omicron;': '\u03bf', 'omid;': '\u29b6', 'ominus;': '\u2296', 'Oopf;': '\U0001d546', 'oopf;': '\U0001d560', 'opar;': '\u29b7', 'OpenCurlyDoubleQuote;': '\u201c', 'OpenCurlyQuote;': '\u2018', 'operp;': '\u29b9', 'oplus;': '\u2295', 'Or;': '\u2a54', 'or;': '\u2228', 'orarr;': '\u21bb', 'ord;': '\u2a5d', 'order;': '\u2134', 'orderof;': '\u2134', 'ordf': '\xaa', 'ordf;': '\xaa', 'ordm': '\xba', 'ordm;': '\xba', 'origof;': '\u22b6', 'oror;': '\u2a56', 'orslope;': '\u2a57', 'orv;': '\u2a5b', 'oS;': '\u24c8', 'Oscr;': '\U0001d4aa', 'oscr;': '\u2134', 'Oslash': '\xd8', 'oslash': '\xf8', 'Oslash;': '\xd8', 'oslash;': '\xf8', 'osol;': '\u2298', 'Otilde': '\xd5', 'otilde': '\xf5', 'Otilde;': '\xd5', 'otilde;': '\xf5', 'Otimes;': '\u2a37', 'otimes;': '\u2297', 'otimesas;': '\u2a36', 'Ouml': '\xd6', 'ouml': '\xf6', 'Ouml;': '\xd6', 'ouml;': '\xf6', 'ovbar;': '\u233d', 'OverBar;': '\u203e', 'OverBrace;': '\u23de', 'OverBracket;': '\u23b4', 'OverParenthesis;': '\u23dc', 'par;': '\u2225', 'para': '\xb6', 'para;': '\xb6', 'parallel;': '\u2225', 'parsim;': '\u2af3', 'parsl;': '\u2afd', 'part;': '\u2202', 'PartialD;': '\u2202', 'Pcy;': '\u041f', 'pcy;': '\u043f', 'percnt;': '%', 'period;': '.', 'permil;': '\u2030', 'perp;': '\u22a5', 'pertenk;': '\u2031', 'Pfr;': '\U0001d513', 'pfr;': '\U0001d52d', 'Phi;': '\u03a6', 'phi;': '\u03c6', 'phiv;': '\u03d5', 'phmmat;': '\u2133', 'phone;': '\u260e', 'Pi;': '\u03a0', 'pi;': '\u03c0', 'pitchfork;': '\u22d4', 'piv;': '\u03d6', 'planck;': '\u210f', 'planckh;': '\u210e', 'plankv;': '\u210f', 'plus;': '+', 'plusacir;': '\u2a23', 'plusb;': '\u229e', 'pluscir;': '\u2a22', 'plusdo;': '\u2214', 'plusdu;': '\u2a25', 'pluse;': '\u2a72', 'PlusMinus;': '\xb1', 'plusmn': '\xb1', 'plusmn;': '\xb1', 'plussim;': '\u2a26', 'plustwo;': '\u2a27', 'pm;': '\xb1', 'Poincareplane;': '\u210c', 'pointint;': '\u2a15', 'Popf;': '\u2119', 'popf;': '\U0001d561', 'pound': '\xa3', 'pound;': '\xa3', 'Pr;': '\u2abb', 'pr;': '\u227a', 'prap;': '\u2ab7', 'prcue;': '\u227c', 'prE;': '\u2ab3', 'pre;': '\u2aaf', 'prec;': '\u227a', 'precapprox;': '\u2ab7', 'preccurlyeq;': '\u227c', 'Precedes;': '\u227a', 'PrecedesEqual;': '\u2aaf', 'PrecedesSlantEqual;': '\u227c', 'PrecedesTilde;': '\u227e', 'preceq;': '\u2aaf', 'precnapprox;': '\u2ab9', 'precneqq;': '\u2ab5', 'precnsim;': '\u22e8', 'precsim;': '\u227e', 'Prime;': '\u2033', 'prime;': '\u2032', 'primes;': '\u2119', 'prnap;': '\u2ab9', 'prnE;': '\u2ab5', 'prnsim;': '\u22e8', 'prod;': '\u220f', 'Product;': '\u220f', 'profalar;': '\u232e', 'profline;': '\u2312', 'profsurf;': '\u2313', 'prop;': '\u221d', 'Proportion;': '\u2237', 'Proportional;': '\u221d', 'propto;': '\u221d', 'prsim;': '\u227e', 'prurel;': '\u22b0', 'Pscr;': '\U0001d4ab', 'pscr;': '\U0001d4c5', 'Psi;': '\u03a8', 'psi;': '\u03c8', 'puncsp;': '\u2008', 'Qfr;': '\U0001d514', 'qfr;': '\U0001d52e', 'qint;': '\u2a0c', 'Qopf;': '\u211a', 'qopf;': '\U0001d562', 'qprime;': '\u2057', 'Qscr;': '\U0001d4ac', 'qscr;': '\U0001d4c6', 'quaternions;': '\u210d', 'quatint;': '\u2a16', 'quest;': '?', 'questeq;': '\u225f', 'QUOT': '"', 'quot': '"', 'QUOT;': '"', 'quot;': '"', 'rAarr;': '\u21db', 'race;': '\u223d\u0331', 'Racute;': '\u0154', 'racute;': '\u0155', 'radic;': '\u221a', 'raemptyv;': '\u29b3', 'Rang;': '\u27eb', 'rang;': '\u27e9', 'rangd;': '\u2992', 'range;': '\u29a5', 'rangle;': '\u27e9', 'raquo': '\xbb', 'raquo;': '\xbb', 'Rarr;': '\u21a0', 'rArr;': '\u21d2', 'rarr;': '\u2192', 'rarrap;': '\u2975', 'rarrb;': '\u21e5', 'rarrbfs;': '\u2920', 'rarrc;': '\u2933', 'rarrfs;': '\u291e', 'rarrhk;': '\u21aa', 'rarrlp;': '\u21ac', 'rarrpl;': '\u2945', 'rarrsim;': '\u2974', 'Rarrtl;': '\u2916', 'rarrtl;': '\u21a3', 'rarrw;': '\u219d', 'rAtail;': '\u291c', 'ratail;': '\u291a', 'ratio;': '\u2236', 'rationals;': '\u211a', 'RBarr;': '\u2910', 'rBarr;': '\u290f', 'rbarr;': '\u290d', 'rbbrk;': '\u2773', 'rbrace;': '}', 'rbrack;': ']', 'rbrke;': '\u298c', 'rbrksld;': '\u298e', 'rbrkslu;': '\u2990', 'Rcaron;': '\u0158', 'rcaron;': '\u0159', 'Rcedil;': '\u0156', 'rcedil;': '\u0157', 'rceil;': '\u2309', 'rcub;': '}', 'Rcy;': '\u0420', 'rcy;': '\u0440', 'rdca;': '\u2937', 'rdldhar;': '\u2969', 'rdquo;': '\u201d', 'rdquor;': '\u201d', 'rdsh;': '\u21b3', 'Re;': '\u211c', 'real;': '\u211c', 'realine;': '\u211b', 'realpart;': '\u211c', 'reals;': '\u211d', 'rect;': '\u25ad', 'REG': '\xae', 'reg': '\xae', 'REG;': '\xae', 'reg;': '\xae', 'ReverseElement;': '\u220b', 'ReverseEquilibrium;': '\u21cb', 'ReverseUpEquilibrium;': '\u296f', 'rfisht;': '\u297d', 'rfloor;': '\u230b', 'Rfr;': '\u211c', 'rfr;': '\U0001d52f', 'rHar;': '\u2964', 'rhard;': '\u21c1', 'rharu;': '\u21c0', 'rharul;': '\u296c', 'Rho;': '\u03a1', 'rho;': '\u03c1', 'rhov;': '\u03f1', 'RightAngleBracket;': '\u27e9', 'RightArrow;': '\u2192', 'Rightarrow;': '\u21d2', 'rightarrow;': '\u2192', 'RightArrowBar;': '\u21e5', 'RightArrowLeftArrow;': '\u21c4', 'rightarrowtail;': '\u21a3', 'RightCeiling;': '\u2309', 'RightDoubleBracket;': '\u27e7', 'RightDownTeeVector;': '\u295d', 'RightDownVector;': '\u21c2', 'RightDownVectorBar;': '\u2955', 'RightFloor;': '\u230b', 'rightharpoondown;': '\u21c1', 'rightharpoonup;': '\u21c0', 'rightleftarrows;': '\u21c4', 'rightleftharpoons;': '\u21cc', 'rightrightarrows;': '\u21c9', 'rightsquigarrow;': '\u219d', 'RightTee;': '\u22a2', 'RightTeeArrow;': '\u21a6', 'RightTeeVector;': '\u295b', 'rightthreetimes;': '\u22cc', 'RightTriangle;': '\u22b3', 'RightTriangleBar;': '\u29d0', 'RightTriangleEqual;': '\u22b5', 'RightUpDownVector;': '\u294f', 'RightUpTeeVector;': '\u295c', 'RightUpVector;': '\u21be', 'RightUpVectorBar;': '\u2954', 'RightVector;': '\u21c0', 'RightVectorBar;': '\u2953', 'ring;': '\u02da', 'risingdotseq;': '\u2253', 'rlarr;': '\u21c4', 'rlhar;': '\u21cc', 'rlm;': '\u200f', 'rmoust;': '\u23b1', 'rmoustache;': '\u23b1', 'rnmid;': '\u2aee', 'roang;': '\u27ed', 'roarr;': '\u21fe', 'robrk;': '\u27e7', 'ropar;': '\u2986', 'Ropf;': '\u211d', 'ropf;': '\U0001d563', 'roplus;': '\u2a2e', 'rotimes;': '\u2a35', 'RoundImplies;': '\u2970', 'rpar;': ')', 'rpargt;': '\u2994', 'rppolint;': '\u2a12', 'rrarr;': '\u21c9', 'Rrightarrow;': '\u21db', 'rsaquo;': '\u203a', 'Rscr;': '\u211b', 'rscr;': '\U0001d4c7', 'Rsh;': '\u21b1', 'rsh;': '\u21b1', 'rsqb;': ']', 'rsquo;': '\u2019', 'rsquor;': '\u2019', 'rthree;': '\u22cc', 'rtimes;': '\u22ca', 'rtri;': '\u25b9', 'rtrie;': '\u22b5', 'rtrif;': '\u25b8', 'rtriltri;': '\u29ce', 'RuleDelayed;': '\u29f4', 'ruluhar;': '\u2968', 'rx;': '\u211e', 'Sacute;': '\u015a', 'sacute;': '\u015b', 'sbquo;': '\u201a', 'Sc;': '\u2abc', 'sc;': '\u227b', 'scap;': '\u2ab8', 'Scaron;': '\u0160', 'scaron;': '\u0161', 'sccue;': '\u227d', 'scE;': '\u2ab4', 'sce;': '\u2ab0', 'Scedil;': '\u015e', 'scedil;': '\u015f', 'Scirc;': '\u015c', 'scirc;': '\u015d', 'scnap;': '\u2aba', 'scnE;': '\u2ab6', 'scnsim;': '\u22e9', 'scpolint;': '\u2a13', 'scsim;': '\u227f', 'Scy;': '\u0421', 'scy;': '\u0441', 'sdot;': '\u22c5', 'sdotb;': '\u22a1', 'sdote;': '\u2a66', 'searhk;': '\u2925', 'seArr;': '\u21d8', 'searr;': '\u2198', 'searrow;': '\u2198', 'sect': '\xa7', 'sect;': '\xa7', 'semi;': ';', 'seswar;': '\u2929', 'setminus;': '\u2216', 'setmn;': '\u2216', 'sext;': '\u2736', 'Sfr;': '\U0001d516', 'sfr;': '\U0001d530', 'sfrown;': '\u2322', 'sharp;': '\u266f', 'SHCHcy;': '\u0429', 'shchcy;': '\u0449', 'SHcy;': '\u0428', 'shcy;': '\u0448', 'ShortDownArrow;': '\u2193', 'ShortLeftArrow;': '\u2190', 'shortmid;': '\u2223', 'shortparallel;': '\u2225', 'ShortRightArrow;': '\u2192', 'ShortUpArrow;': '\u2191', 'shy': '\xad', 'shy;': '\xad', 'Sigma;': '\u03a3', 'sigma;': '\u03c3', 'sigmaf;': '\u03c2', 'sigmav;': '\u03c2', 'sim;': '\u223c', 'simdot;': '\u2a6a', 'sime;': '\u2243', 'simeq;': '\u2243', 'simg;': '\u2a9e', 'simgE;': '\u2aa0', 'siml;': '\u2a9d', 'simlE;': '\u2a9f', 'simne;': '\u2246', 'simplus;': '\u2a24', 'simrarr;': '\u2972', 'slarr;': '\u2190', 'SmallCircle;': '\u2218', 'smallsetminus;': '\u2216', 'smashp;': '\u2a33', 'smeparsl;': '\u29e4', 'smid;': '\u2223', 'smile;': '\u2323', 'smt;': '\u2aaa', 'smte;': '\u2aac', 'smtes;': '\u2aac\ufe00', 'SOFTcy;': '\u042c', 'softcy;': '\u044c', 'sol;': '/', 'solb;': '\u29c4', 'solbar;': '\u233f', 'Sopf;': '\U0001d54a', 'sopf;': '\U0001d564', 'spades;': '\u2660', 'spadesuit;': '\u2660', 'spar;': '\u2225', 'sqcap;': '\u2293', 'sqcaps;': '\u2293\ufe00', 'sqcup;': '\u2294', 'sqcups;': '\u2294\ufe00', 'Sqrt;': '\u221a', 'sqsub;': '\u228f', 'sqsube;': '\u2291', 'sqsubset;': '\u228f', 'sqsubseteq;': '\u2291', 'sqsup;': '\u2290', 'sqsupe;': '\u2292', 'sqsupset;': '\u2290', 'sqsupseteq;': '\u2292', 'squ;': '\u25a1', 'Square;': '\u25a1', 'square;': '\u25a1', 'SquareIntersection;': '\u2293', 'SquareSubset;': '\u228f', 'SquareSubsetEqual;': '\u2291', 'SquareSuperset;': '\u2290', 'SquareSupersetEqual;': '\u2292', 'SquareUnion;': '\u2294', 'squarf;': '\u25aa', 'squf;': '\u25aa', 'srarr;': '\u2192', 'Sscr;': '\U0001d4ae', 'sscr;': '\U0001d4c8', 'ssetmn;': '\u2216', 'ssmile;': '\u2323', 'sstarf;': '\u22c6', 'Star;': '\u22c6', 'star;': '\u2606', 'starf;': '\u2605', 'straightepsilon;': '\u03f5', 'straightphi;': '\u03d5', 'strns;': '\xaf', 'Sub;': '\u22d0', 'sub;': '\u2282', 'subdot;': '\u2abd', 'subE;': '\u2ac5', 'sube;': '\u2286', 'subedot;': '\u2ac3', 'submult;': '\u2ac1', 'subnE;': '\u2acb', 'subne;': '\u228a', 'subplus;': '\u2abf', 'subrarr;': '\u2979', 'Subset;': '\u22d0', 'subset;': '\u2282', 'subseteq;': '\u2286', 'subseteqq;': '\u2ac5', 'SubsetEqual;': '\u2286', 'subsetneq;': '\u228a', 'subsetneqq;': '\u2acb', 'subsim;': '\u2ac7', 'subsub;': '\u2ad5', 'subsup;': '\u2ad3', 'succ;': '\u227b', 'succapprox;': '\u2ab8', 'succcurlyeq;': '\u227d', 'Succeeds;': '\u227b', 'SucceedsEqual;': '\u2ab0', 'SucceedsSlantEqual;': '\u227d', 'SucceedsTilde;': '\u227f', 'succeq;': '\u2ab0', 'succnapprox;': '\u2aba', 'succneqq;': '\u2ab6', 'succnsim;': '\u22e9', 'succsim;': '\u227f', 'SuchThat;': '\u220b', 'Sum;': '\u2211', 'sum;': '\u2211', 'sung;': '\u266a', 'sup1': '\xb9', 'sup1;': '\xb9', 'sup2': '\xb2', 'sup2;': '\xb2', 'sup3': '\xb3', 'sup3;': '\xb3', 'Sup;': '\u22d1', 'sup;': '\u2283', 'supdot;': '\u2abe', 'supdsub;': '\u2ad8', 'supE;': '\u2ac6', 'supe;': '\u2287', 'supedot;': '\u2ac4', 'Superset;': '\u2283', 'SupersetEqual;': '\u2287', 'suphsol;': '\u27c9', 'suphsub;': '\u2ad7', 'suplarr;': '\u297b', 'supmult;': '\u2ac2', 'supnE;': '\u2acc', 'supne;': '\u228b', 'supplus;': '\u2ac0', 'Supset;': '\u22d1', 'supset;': '\u2283', 'supseteq;': '\u2287', 'supseteqq;': '\u2ac6', 'supsetneq;': '\u228b', 'supsetneqq;': '\u2acc', 'supsim;': '\u2ac8', 'supsub;': '\u2ad4', 'supsup;': '\u2ad6', 'swarhk;': '\u2926', 'swArr;': '\u21d9', 'swarr;': '\u2199', 'swarrow;': '\u2199', 'swnwar;': '\u292a', 'szlig': '\xdf', 'szlig;': '\xdf', 'Tab;': '\t', 'target;': '\u2316', 'Tau;': '\u03a4', 'tau;': '\u03c4', 'tbrk;': '\u23b4', 'Tcaron;': '\u0164', 'tcaron;': '\u0165', 'Tcedil;': '\u0162', 'tcedil;': '\u0163', 'Tcy;': '\u0422', 'tcy;': '\u0442', 'tdot;': '\u20db', 'telrec;': '\u2315', 'Tfr;': '\U0001d517', 'tfr;': '\U0001d531', 'there4;': '\u2234', 'Therefore;': '\u2234', 'therefore;': '\u2234', 'Theta;': '\u0398', 'theta;': '\u03b8', 'thetasym;': '\u03d1', 'thetav;': '\u03d1', 'thickapprox;': '\u2248', 'thicksim;': '\u223c', 'ThickSpace;': '\u205f\u200a', 'thinsp;': '\u2009', 'ThinSpace;': '\u2009', 'thkap;': '\u2248', 'thksim;': '\u223c', 'THORN': '\xde', 'thorn': '\xfe', 'THORN;': '\xde', 'thorn;': '\xfe', 'Tilde;': '\u223c', 'tilde;': '\u02dc', 'TildeEqual;': '\u2243', 'TildeFullEqual;': '\u2245', 'TildeTilde;': '\u2248', 'times': '\xd7', 'times;': '\xd7', 'timesb;': '\u22a0', 'timesbar;': '\u2a31', 'timesd;': '\u2a30', 'tint;': '\u222d', 'toea;': '\u2928', 'top;': '\u22a4', 'topbot;': '\u2336', 'topcir;': '\u2af1', 'Topf;': '\U0001d54b', 'topf;': '\U0001d565', 'topfork;': '\u2ada', 'tosa;': '\u2929', 'tprime;': '\u2034', 'TRADE;': '\u2122', 'trade;': '\u2122', 'triangle;': '\u25b5', 'triangledown;': '\u25bf', 'triangleleft;': '\u25c3', 'trianglelefteq;': '\u22b4', 'triangleq;': '\u225c', 'triangleright;': '\u25b9', 'trianglerighteq;': '\u22b5', 'tridot;': '\u25ec', 'trie;': '\u225c', 'triminus;': '\u2a3a', 'TripleDot;': '\u20db', 'triplus;': '\u2a39', 'trisb;': '\u29cd', 'tritime;': '\u2a3b', 'trpezium;': '\u23e2', 'Tscr;': '\U0001d4af', 'tscr;': '\U0001d4c9', 'TScy;': '\u0426', 'tscy;': '\u0446', 'TSHcy;': '\u040b', 'tshcy;': '\u045b', 'Tstrok;': '\u0166', 'tstrok;': '\u0167', 'twixt;': '\u226c', 'twoheadleftarrow;': '\u219e', 'twoheadrightarrow;': '\u21a0', 'Uacute': '\xda', 'uacute': '\xfa', 'Uacute;': '\xda', 'uacute;': '\xfa', 'Uarr;': '\u219f', 'uArr;': '\u21d1', 'uarr;': '\u2191', 'Uarrocir;': '\u2949', 'Ubrcy;': '\u040e', 'ubrcy;': '\u045e', 'Ubreve;': '\u016c', 'ubreve;': '\u016d', 'Ucirc': '\xdb', 'ucirc': '\xfb', 'Ucirc;': '\xdb', 'ucirc;': '\xfb', 'Ucy;': '\u0423', 'ucy;': '\u0443', 'udarr;': '\u21c5', 'Udblac;': '\u0170', 'udblac;': '\u0171', 'udhar;': '\u296e', 'ufisht;': '\u297e', 'Ufr;': '\U0001d518', 'ufr;': '\U0001d532', 'Ugrave': '\xd9', 'ugrave': '\xf9', 'Ugrave;': '\xd9', 'ugrave;': '\xf9', 'uHar;': '\u2963', 'uharl;': '\u21bf', 'uharr;': '\u21be', 'uhblk;': '\u2580', 'ulcorn;': '\u231c', 'ulcorner;': '\u231c', 'ulcrop;': '\u230f', 'ultri;': '\u25f8', 'Umacr;': '\u016a', 'umacr;': '\u016b', 'uml': '\xa8', 'uml;': '\xa8', 'UnderBar;': '_', 'UnderBrace;': '\u23df', 'UnderBracket;': '\u23b5', 'UnderParenthesis;': '\u23dd', 'Union;': '\u22c3', 'UnionPlus;': '\u228e', 'Uogon;': '\u0172', 'uogon;': '\u0173', 'Uopf;': '\U0001d54c', 'uopf;': '\U0001d566', 'UpArrow;': '\u2191', 'Uparrow;': '\u21d1', 'uparrow;': '\u2191', 'UpArrowBar;': '\u2912', 'UpArrowDownArrow;': '\u21c5', 'UpDownArrow;': '\u2195', 'Updownarrow;': '\u21d5', 'updownarrow;': '\u2195', 'UpEquilibrium;': '\u296e', 'upharpoonleft;': '\u21bf', 'upharpoonright;': '\u21be', 'uplus;': '\u228e', 'UpperLeftArrow;': '\u2196', 'UpperRightArrow;': '\u2197', 'Upsi;': '\u03d2', 'upsi;': '\u03c5', 'upsih;': '\u03d2', 'Upsilon;': '\u03a5', 'upsilon;': '\u03c5', 'UpTee;': '\u22a5', 'UpTeeArrow;': '\u21a5', 'upuparrows;': '\u21c8', 'urcorn;': '\u231d', 'urcorner;': '\u231d', 'urcrop;': '\u230e', 'Uring;': '\u016e', 'uring;': '\u016f', 'urtri;': '\u25f9', 'Uscr;': '\U0001d4b0', 'uscr;': '\U0001d4ca', 'utdot;': '\u22f0', 'Utilde;': '\u0168', 'utilde;': '\u0169', 'utri;': '\u25b5', 'utrif;': '\u25b4', 'uuarr;': '\u21c8', 'Uuml': '\xdc', 'uuml': '\xfc', 'Uuml;': '\xdc', 'uuml;': '\xfc', 'uwangle;': '\u29a7', 'vangrt;': '\u299c', 'varepsilon;': '\u03f5', 'varkappa;': '\u03f0', 'varnothing;': '\u2205', 'varphi;': '\u03d5', 'varpi;': '\u03d6', 'varpropto;': '\u221d', 'vArr;': '\u21d5', 'varr;': '\u2195', 'varrho;': '\u03f1', 'varsigma;': '\u03c2', 'varsubsetneq;': '\u228a\ufe00', 'varsubsetneqq;': '\u2acb\ufe00', 'varsupsetneq;': '\u228b\ufe00', 'varsupsetneqq;': '\u2acc\ufe00', 'vartheta;': '\u03d1', 'vartriangleleft;': '\u22b2', 'vartriangleright;': '\u22b3', 'Vbar;': '\u2aeb', 'vBar;': '\u2ae8', 'vBarv;': '\u2ae9', 'Vcy;': '\u0412', 'vcy;': '\u0432', 'VDash;': '\u22ab', 'Vdash;': '\u22a9', 'vDash;': '\u22a8', 'vdash;': '\u22a2', 'Vdashl;': '\u2ae6', 'Vee;': '\u22c1', 'vee;': '\u2228', 'veebar;': '\u22bb', 'veeeq;': '\u225a', 'vellip;': '\u22ee', 'Verbar;': '\u2016', 'verbar;': '|', 'Vert;': '\u2016', 'vert;': '|', 'VerticalBar;': '\u2223', 'VerticalLine;': '|', 'VerticalSeparator;': '\u2758', 'VerticalTilde;': '\u2240', 'VeryThinSpace;': '\u200a', 'Vfr;': '\U0001d519', 'vfr;': '\U0001d533', 'vltri;': '\u22b2', 'vnsub;': '\u2282\u20d2', 'vnsup;': '\u2283\u20d2', 'Vopf;': '\U0001d54d', 'vopf;': '\U0001d567', 'vprop;': '\u221d', 'vrtri;': '\u22b3', 'Vscr;': '\U0001d4b1', 'vscr;': '\U0001d4cb', 'vsubnE;': '\u2acb\ufe00', 'vsubne;': '\u228a\ufe00', 'vsupnE;': '\u2acc\ufe00', 'vsupne;': '\u228b\ufe00', 'Vvdash;': '\u22aa', 'vzigzag;': '\u299a', 'Wcirc;': '\u0174', 'wcirc;': '\u0175', 'wedbar;': '\u2a5f', 'Wedge;': '\u22c0', 'wedge;': '\u2227', 'wedgeq;': '\u2259', 'weierp;': '\u2118', 'Wfr;': '\U0001d51a', 'wfr;': '\U0001d534', 'Wopf;': '\U0001d54e', 'wopf;': '\U0001d568', 'wp;': '\u2118', 'wr;': '\u2240', 'wreath;': '\u2240', 'Wscr;': '\U0001d4b2', 'wscr;': '\U0001d4cc', 'xcap;': '\u22c2', 'xcirc;': '\u25ef', 'xcup;': '\u22c3', 'xdtri;': '\u25bd', 'Xfr;': '\U0001d51b', 'xfr;': '\U0001d535', 'xhArr;': '\u27fa', 'xharr;': '\u27f7', 'Xi;': '\u039e', 'xi;': '\u03be', 'xlArr;': '\u27f8', 'xlarr;': '\u27f5', 'xmap;': '\u27fc', 'xnis;': '\u22fb', 'xodot;': '\u2a00', 'Xopf;': '\U0001d54f', 'xopf;': '\U0001d569', 'xoplus;': '\u2a01', 'xotime;': '\u2a02', 'xrArr;': '\u27f9', 'xrarr;': '\u27f6', 'Xscr;': '\U0001d4b3', 'xscr;': '\U0001d4cd', 'xsqcup;': '\u2a06', 'xuplus;': '\u2a04', 'xutri;': '\u25b3', 'xvee;': '\u22c1', 'xwedge;': '\u22c0', 'Yacute': '\xdd', 'yacute': '\xfd', 'Yacute;': '\xdd', 'yacute;': '\xfd', 'YAcy;': '\u042f', 'yacy;': '\u044f', 'Ycirc;': '\u0176', 'ycirc;': '\u0177', 'Ycy;': '\u042b', 'ycy;': '\u044b', 'yen': '\xa5', 'yen;': '\xa5', 'Yfr;': '\U0001d51c', 'yfr;': '\U0001d536', 'YIcy;': '\u0407', 'yicy;': '\u0457', 'Yopf;': '\U0001d550', 'yopf;': '\U0001d56a', 'Yscr;': '\U0001d4b4', 'yscr;': '\U0001d4ce', 'YUcy;': '\u042e', 'yucy;': '\u044e', 'yuml': '\xff', 'Yuml;': '\u0178', 'yuml;': '\xff', 'Zacute;': '\u0179', 'zacute;': '\u017a', 'Zcaron;': '\u017d', 'zcaron;': '\u017e', 'Zcy;': '\u0417', 'zcy;': '\u0437', 'Zdot;': '\u017b', 'zdot;': '\u017c', 'zeetrf;': '\u2128', 'ZeroWidthSpace;': '\u200b', 'Zeta;': '\u0396', 'zeta;': '\u03b6', 'Zfr;': '\u2128', 'zfr;': '\U0001d537', 'ZHcy;': '\u0416', 'zhcy;': '\u0436', 'zigrarr;': '\u21dd', 'Zopf;': '\u2124', 'zopf;': '\U0001d56b', 'Zscr;': '\U0001d4b5', 'zscr;': '\U0001d4cf', 'zwj;': '\u200d', 'zwnj;': '\u200c', } class EntitySubstitution(object): """The ability to substitute XML or HTML entities for certain characters.""" def _populate_class_variables(): """Initialize variables used by this class to manage the plethora of HTML5 named entities. This function returns a 3-tuple containing two dictionaries and a regular expression: unicode_to_name - A mapping of Unicode strings like "⦨" to entity names like "angmsdaa". When a single Unicode string has multiple entity names, we try to choose the most commonly-used name. name_to_unicode: A mapping of entity names like "angmsdaa" to Unicode strings like "⦨". named_entity_re: A regular expression matching (almost) any Unicode string that corresponds to an HTML5 named entity. """ unicode_to_name = {} name_to_unicode = {} short_entities = set() long_entities_by_first_character = defaultdict(set) for name_with_semicolon, character in sorted(html5.items()): # "It is intentional, for legacy compatibility, that many # code points have multiple character reference names. For # example, some appear both with and without the trailing # semicolon, or with different capitalizations." # - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references # # The parsers are in charge of handling (or not) character # references with no trailing semicolon, so we remove the # semicolon whenever it appears. if name_with_semicolon.endswith(';'): name = name_with_semicolon[:-1] else: name = name_with_semicolon # When parsing HTML, we want to recognize any known named # entity and convert it to a sequence of Unicode # characters. if name not in name_to_unicode: name_to_unicode[name] = character # When _generating_ HTML, we want to recognize special # character sequences that _could_ be converted to named # entities. unicode_to_name[character] = name # We also need to build a regular expression that lets us # _find_ those characters in output strings so we can # replace them. # # This is tricky, for two reasons. if (len(character) == 1 and ord(character) < 128 and character not in '<>&'): # First, it would be annoying to turn single ASCII # characters like | into named entities like # &verbar;. The exceptions are <>&, which we _must_ # turn into named entities to produce valid HTML. continue if len(character) > 1 and all(ord(x) < 128 for x in character): # We also do not want to turn _combinations_ of ASCII # characters like 'fj' into named entities like '&fjlig;', # though that's more debateable. continue # Second, some named entities have a Unicode value that's # a subset of the Unicode value for some _other_ named # entity. As an example, \u2267' is &GreaterFullEqual;, # but '\u2267\u0338' is &NotGreaterFullEqual;. Our regular # expression needs to match the first two characters of # "\u2267\u0338foo", but only the first character of # "\u2267foo". # # In this step, we build two sets of characters that # _eventually_ need to go into the regular expression. But # we won't know exactly what the regular expression needs # to look like until we've gone through the entire list of # named entities. if len(character) == 1: short_entities.add(character) else: long_entities_by_first_character[character[0]].add(character) # Now that we've been through the entire list of entities, we # can create a regular expression that matches any of them. particles = set() for short in short_entities: long_versions = long_entities_by_first_character[short] if not long_versions: particles.add(short) else: ignore = "".join([x[1] for x in long_versions]) # This finds, e.g. \u2267 but only if it is _not_ # followed by \u0338. particles.add("%s(?![%s])" % (short, ignore)) for long_entities in list(long_entities_by_first_character.values()): for long_entity in long_entities: particles.add(long_entity) re_definition = "(%s)" % "|".join(particles) # If an entity shows up in both html5 and codepoint2name, it's # likely that HTML5 gives it several different names, such as # 'rsquo' and 'rsquor'. When converting Unicode characters to # named entities, the codepoint2name name should take # precedence where possible, since that's the more easily # recognizable one. for codepoint, name in list(codepoint2name.items()): character = chr(codepoint) unicode_to_name[character] = name return unicode_to_name, name_to_unicode, re.compile(re_definition) (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() CHARACTER_TO_XML_ENTITY = { "'": "apos", '"': "quot", "&": "amp", "<": "lt", ">": "gt", } BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)" ")") AMPERSAND_OR_BRACKET = re.compile("([<>&])") @classmethod def _substitute_html_entity(cls, matchobj): """Used with a regular expression to substitute the appropriate HTML entity for a special character string.""" entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) return "&%s;" % entity @classmethod def _substitute_xml_entity(cls, matchobj): """Used with a regular expression to substitute the appropriate XML entity for a special character string.""" entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] return "&%s;" % entity @classmethod def quoted_attribute_value(self, value): """Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot; """ quote_with = '"' if '"' in value: if "'" in value: # The string contains both single and double # quotes. Turn the double quotes into # entities. We quote the double quotes rather than # the single quotes because the entity name is # "&quot;" whether this is HTML or XML. If we # quoted the single quotes, we'd have to decide # between &apos; and &squot;. replace_with = "&quot;" value = value.replace('"', replace_with) else: # There are double quotes but no single quotes. # We can use single quotes to quote the attribute. quote_with = "'" return quote_with + value + quote_with @classmethod def substitute_xml(cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands will become &amp;. If you want ampersands that appear to be part of an entity definition to be left alone, use substitute_xml_containing_entities() instead. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets and ampersands. value = cls.AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_html(cls, s): """Replace certain Unicode characters with named HTML entities. This differs from data.encode(encoding, 'xmlcharrefreplace') in that the goal is to make the result more readable (to those with ASCII displays) rather than to recover from errors. There's absolutely nothing wrong with a UTF-8 string containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that character with "&eacute;" will make it more readable to some people. :param s: A Unicode string. """ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( cls._substitute_html_entity, s) class EncodingDetector: """Suggests a number of possible encodings for a bytestring. Order of precedence: 1. Encodings you specifically tell EncodingDetector to try first (the known_definite_encodings argument to the constructor). 2. An encoding determined by sniffing the document's byte-order mark. 3. Encodings you specifically tell EncodingDetector to try if byte-order mark sniffing fails (the user_encodings argument to the constructor). 4. An encoding declared within the bytestring itself, either in an XML declaration (if the bytestring is to be interpreted as an XML document), or in a <meta> tag (if the bytestring is to be interpreted as an HTML document.) 5. An encoding detected through textual analysis by chardet, cchardet, or a similar external library. 4. UTF-8. 5. Windows-1252. """ def __init__(self, markup, known_definite_encodings=None, is_html=False, exclude_encodings=None, user_encodings=None, override_encodings=None): """Constructor. :param markup: Some markup in an unknown encoding. :param known_definite_encodings: When determining the encoding of `markup`, these encodings will be tried first, in order. In HTML terms, this corresponds to the "known definite encoding" step defined here: https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding :param user_encodings: These encodings will be tried after the `known_definite_encodings` have been tried and failed, and after an attempt to sniff the encoding by looking at a byte order mark has failed. In HTML terms, this corresponds to the step "user has explicitly instructed the user agent to override the document's character encoding", defined here: https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding :param override_encodings: A deprecated alias for known_definite_encodings. Any encodings here will be tried immediately after the encodings in known_definite_encodings. :param is_html: If True, this markup is considered to be HTML. Otherwise it's assumed to be XML. :param exclude_encodings: These encodings will not be tried, even if they otherwise would be. """ self.known_definite_encodings = list(known_definite_encodings or []) if override_encodings: self.known_definite_encodings += override_encodings self.user_encodings = user_encodings or [] exclude_encodings = exclude_encodings or [] self.exclude_encodings = set([x.lower() for x in exclude_encodings]) self.chardet_encoding = None self.is_html = is_html self.declared_encoding = None # First order of business: strip a byte-order mark. self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) def _usable(self, encoding, tried): """Should we even bother to try this encoding? :param encoding: Name of an encoding. :param tried: Encodings that have already been tried. This will be modified as a side effect. """ if encoding is not None: encoding = encoding.lower() if encoding in self.exclude_encodings: return False if encoding not in tried: tried.add(encoding) return True return False @property def encodings(self): """Yield a number of encodings that might work for this markup. :yield: A sequence of strings. """ tried = set() # First, try the known definite encodings for e in self.known_definite_encodings: if self._usable(e, tried): yield e # Did the document originally start with a byte-order mark # that indicated its encoding? if self._usable(self.sniffed_encoding, tried): yield self.sniffed_encoding # Sniffing the byte-order mark did nothing; try the user # encodings. for e in self.user_encodings: if self._usable(e, tried): yield e # Look within the document for an XML or HTML encoding # declaration. if self.declared_encoding is None: self.declared_encoding = self.find_declared_encoding( self.markup, self.is_html) if self._usable(self.declared_encoding, tried): yield self.declared_encoding # Use third-party character set detection to guess at the # encoding. if self.chardet_encoding is None: self.chardet_encoding = chardet_dammit(self.markup) if self._usable(self.chardet_encoding, tried): yield self.chardet_encoding # As a last-ditch effort, try utf-8 and windows-1252. for e in ('utf-8', 'windows-1252'): if self._usable(e, tried): yield e @classmethod def strip_byte_order_mark(cls, data): """If a byte-order mark is present, strip it and return the encoding it implies. :param data: Some markup. :return: A 2-tuple (modified data, implied encoding) """ encoding = None if isinstance(data, str): # Unicode data cannot have a byte-order mark. return data, encoding if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == b'\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == b'\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == b'\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] return data, encoding @classmethod def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): """Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. :param markup: Some markup. :param is_html: If True, this markup is considered to be HTML. Otherwise it's assumed to be XML. :param search_entire_document: Since an encoding is supposed to declared near the beginning of the document, most of the time it's only necessary to search a few kilobytes of data. Set this to True to force this method to search the entire document. """ if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) if isinstance(markup, bytes): res = encoding_res[bytes] else: res = encoding_res[str] xml_re = res['xml'] html_re = res['html'] declared_encoding = None declared_encoding_match = xml_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0] if declared_encoding: if isinstance(declared_encoding, bytes): declared_encoding = declared_encoding.decode('ascii', 'replace') return declared_encoding.lower() return None class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = {"macintosh": "mac-roman", "x-sjis": "shift-jis"} ENCODINGS_WITH_SMART_QUOTES = [ "windows-1252", "iso-8859-1", "iso-8859-2", ] def __init__(self, markup, known_definite_encodings=[], smart_quotes_to=None, is_html=False, exclude_encodings=[], user_encodings=None, override_encodings=None ): """Constructor. :param markup: A bytestring representing markup in an unknown encoding. :param known_definite_encodings: When determining the encoding of `markup`, these encodings will be tried first, in order. In HTML terms, this corresponds to the "known definite encoding" step defined here: https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding :param user_encodings: These encodings will be tried after the `known_definite_encodings` have been tried and failed, and after an attempt to sniff the encoding by looking at a byte order mark has failed. In HTML terms, this corresponds to the step "user has explicitly instructed the user agent to override the document's character encoding", defined here: https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding :param override_encodings: A deprecated alias for known_definite_encodings. Any encodings here will be tried immediately after the encodings in known_definite_encodings. :param smart_quotes_to: By default, Microsoft smart quotes will, like all other characters, be converted to Unicode characters. Setting this to 'ascii' will convert them to ASCII quotes instead. Setting it to 'xml' will convert them to XML entity references, and setting it to 'html' will convert them to HTML entity references. :param is_html: If True, this markup is considered to be HTML. Otherwise it's assumed to be XML. :param exclude_encodings: These encodings will not be considered, even if the sniffing code thinks they might make sense. """ self.smart_quotes_to = smart_quotes_to self.tried_encodings = [] self.contains_replacement_characters = False self.is_html = is_html self.log = logging.getLogger(__name__) self.detector = EncodingDetector( markup, known_definite_encodings, is_html, exclude_encodings, user_encodings, override_encodings ) # Short-circuit if the data is in Unicode to begin with. if isinstance(markup, str) or markup == '': self.markup = markup self.unicode_markup = str(markup) self.original_encoding = None return # The encoding detector may have stripped a byte-order mark. # Use the stripped markup from this point on. self.markup = self.detector.markup u = None for encoding in self.detector.encodings: markup = self.detector.markup u = self._convert_from(encoding) if u is not None: break if not u: # None of the encodings worked. As an absolute last resort, # try them again with character replacement. for encoding in self.detector.encodings: if encoding != "ascii": u = self._convert_from(encoding, "replace") if u is not None: self.log.warning( "Some characters could not be decoded, and were " "replaced with REPLACEMENT CHARACTER." ) self.contains_replacement_characters = True break # If none of that worked, we could at this point force it to # ASCII, but that would destroy so much data that I think # giving up is better. self.unicode_markup = u if not u: self.original_encoding = None def _sub_ms_char(self, match): """Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub def _convert_from(self, proposed, errors="strict"): """Attempt to convert the markup to the proposed encoding. :param proposed: The name of a character encoding. """ proposed = self.find_codec(proposed) if not proposed or (proposed, errors) in self.tried_encodings: return None self.tried_encodings.append((proposed, errors)) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if (self.smart_quotes_to is not None and proposed in self.ENCODINGS_WITH_SMART_QUOTES): smart_quotes_re = b"([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) try: #print("Trying to convert document to %s (errors=%s)" % ( # proposed, errors)) u = self._to_unicode(markup, proposed, errors) self.markup = u self.original_encoding = proposed except Exception as e: #print("That didn't work!") #print(e) return None #print("Correct encoding: %s" % proposed) return self.markup def _to_unicode(self, data, encoding, errors="strict"): """Given a string and its encoding, decodes the string into Unicode. :param encoding: The name of an encoding. """ return str(data, encoding, errors) @property def declared_html_encoding(self): """If the markup is an HTML document, returns the encoding declared _within_ the document. """ if not self.is_html: return None return self.detector.declared_encoding def find_codec(self, charset): """Convert the name of a character set to a codec name. :param charset: The name of a character set. :return: The name of a codec. """ value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) or (charset and self._codec(charset.replace("-", ""))) or (charset and self._codec(charset.replace("-", "_"))) or (charset and charset.lower()) or charset ) if value: return value.lower() return None def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. MS_CHARS = {b'\x80': ('euro', '20AC'), b'\x81': ' ', b'\x82': ('sbquo', '201A'), b'\x83': ('fnof', '192'), b'\x84': ('bdquo', '201E'), b'\x85': ('hellip', '2026'), b'\x86': ('dagger', '2020'), b'\x87': ('Dagger', '2021'), b'\x88': ('circ', '2C6'), b'\x89': ('permil', '2030'), b'\x8A': ('Scaron', '160'), b'\x8B': ('lsaquo', '2039'), b'\x8C': ('OElig', '152'), b'\x8D': '?', b'\x8E': ('#x17D', '17D'), b'\x8F': '?', b'\x90': '?', b'\x91': ('lsquo', '2018'), b'\x92': ('rsquo', '2019'), b'\x93': ('ldquo', '201C'), b'\x94': ('rdquo', '201D'), b'\x95': ('bull', '2022'), b'\x96': ('ndash', '2013'), b'\x97': ('mdash', '2014'), b'\x98': ('tilde', '2DC'), b'\x99': ('trade', '2122'), b'\x9a': ('scaron', '161'), b'\x9b': ('rsaquo', '203A'), b'\x9c': ('oelig', '153'), b'\x9d': '?', b'\x9e': ('#x17E', '17E'), b'\x9f': ('Yuml', ''),} # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains # horrors like stripping diacritical marks to turn á into a, but also # contains non-horrors like turning “ into ". MS_CHARS_TO_ASCII = { b'\x80' : 'EUR', b'\x81' : ' ', b'\x82' : ',', b'\x83' : 'f', b'\x84' : ',,', b'\x85' : '...', b'\x86' : '+', b'\x87' : '++', b'\x88' : '^', b'\x89' : '%', b'\x8a' : 'S', b'\x8b' : '<', b'\x8c' : 'OE', b'\x8d' : '?', b'\x8e' : 'Z', b'\x8f' : '?', b'\x90' : '?', b'\x91' : "'", b'\x92' : "'", b'\x93' : '"', b'\x94' : '"', b'\x95' : '*', b'\x96' : '-', b'\x97' : '--', b'\x98' : '~', b'\x99' : '(TM)', b'\x9a' : 's', b'\x9b' : '>', b'\x9c' : 'oe', b'\x9d' : '?', b'\x9e' : 'z', b'\x9f' : 'Y', b'\xa0' : ' ', b'\xa1' : '!', b'\xa2' : 'c', b'\xa3' : 'GBP', b'\xa4' : '$', #This approximation is especially parochial--this is the #generic currency symbol. b'\xa5' : 'YEN', b'\xa6' : '|', b'\xa7' : 'S', b'\xa8' : '..', b'\xa9' : '', b'\xaa' : '(th)', b'\xab' : '<<', b'\xac' : '!', b'\xad' : ' ', b'\xae' : '(R)', b'\xaf' : '-', b'\xb0' : 'o', b'\xb1' : '+-', b'\xb2' : '2', b'\xb3' : '3', b'\xb4' : ("'", 'acute'), b'\xb5' : 'u', b'\xb6' : 'P', b'\xb7' : '*', b'\xb8' : ',', b'\xb9' : '1', b'\xba' : '(th)', b'\xbb' : '>>', b'\xbc' : '1/4', b'\xbd' : '1/2', b'\xbe' : '3/4', b'\xbf' : '?', b'\xc0' : 'A', b'\xc1' : 'A', b'\xc2' : 'A', b'\xc3' : 'A', b'\xc4' : 'A', b'\xc5' : 'A', b'\xc6' : 'AE', b'\xc7' : 'C', b'\xc8' : 'E', b'\xc9' : 'E', b'\xca' : 'E', b'\xcb' : 'E', b'\xcc' : 'I', b'\xcd' : 'I', b'\xce' : 'I', b'\xcf' : 'I', b'\xd0' : 'D', b'\xd1' : 'N', b'\xd2' : 'O', b'\xd3' : 'O', b'\xd4' : 'O', b'\xd5' : 'O', b'\xd6' : 'O', b'\xd7' : '*', b'\xd8' : 'O', b'\xd9' : 'U', b'\xda' : 'U', b'\xdb' : 'U', b'\xdc' : 'U', b'\xdd' : 'Y', b'\xde' : 'b', b'\xdf' : 'B', b'\xe0' : 'a', b'\xe1' : 'a', b'\xe2' : 'a', b'\xe3' : 'a', b'\xe4' : 'a', b'\xe5' : 'a', b'\xe6' : 'ae', b'\xe7' : 'c', b'\xe8' : 'e', b'\xe9' : 'e', b'\xea' : 'e', b'\xeb' : 'e', b'\xec' : 'i', b'\xed' : 'i', b'\xee' : 'i', b'\xef' : 'i', b'\xf0' : 'o', b'\xf1' : 'n', b'\xf2' : 'o', b'\xf3' : 'o', b'\xf4' : 'o', b'\xf5' : 'o', b'\xf6' : 'o', b'\xf7' : '/', b'\xf8' : 'o', b'\xf9' : 'u', b'\xfa' : 'u', b'\xfb' : 'u', b'\xfc' : 'u', b'\xfd' : 'y', b'\xfe' : 'b', b'\xff' : 'y', } # A map used when removing rogue Windows-1252/ISO-8859-1 # characters in otherwise UTF-8 documents. # # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in # Windows-1252. WINDOWS_1252_TO_UTF8 = { 0x80 : b'\xe2\x82\xac', # € 0x82 : b'\xe2\x80\x9a', # ‚ 0x83 : b'\xc6\x92', # ƒ 0x84 : b'\xe2\x80\x9e', # „ 0x85 : b'\xe2\x80\xa6', # … 0x86 : b'\xe2\x80\xa0', # † 0x87 : b'\xe2\x80\xa1', # ‡ 0x88 : b'\xcb\x86', # ˆ 0x89 : b'\xe2\x80\xb0', # ‰ 0x8a : b'\xc5\xa0', # Š 0x8b : b'\xe2\x80\xb9', # ‹ 0x8c : b'\xc5\x92', # Œ 0x8e : b'\xc5\xbd', # Ž 0x91 : b'\xe2\x80\x98', # ‘ 0x92 : b'\xe2\x80\x99', # ’ 0x93 : b'\xe2\x80\x9c', # “ 0x94 : b'\xe2\x80\x9d', # ” 0x95 : b'\xe2\x80\xa2', # • 0x96 : b'\xe2\x80\x93', # – 0x97 : b'\xe2\x80\x94', # — 0x98 : b'\xcb\x9c', # ˜ 0x99 : b'\xe2\x84\xa2', # ™ 0x9a : b'\xc5\xa1', # š 0x9b : b'\xe2\x80\xba', # › 0x9c : b'\xc5\x93', # œ 0x9e : b'\xc5\xbe', # ž 0x9f : b'\xc5\xb8', # Ÿ 0xa0 : b'\xc2\xa0', #   0xa1 : b'\xc2\xa1', # ¡ 0xa2 : b'\xc2\xa2', # ¢ 0xa3 : b'\xc2\xa3', # £ 0xa4 : b'\xc2\xa4', # ¤ 0xa5 : b'\xc2\xa5', # ¥ 0xa6 : b'\xc2\xa6', # ¦ 0xa7 : b'\xc2\xa7', # § 0xa8 : b'\xc2\xa8', # ¨ 0xa9 : b'\xc2\xa9', # © 0xaa : b'\xc2\xaa', # ª 0xab : b'\xc2\xab', # « 0xac : b'\xc2\xac', # ¬ 0xad : b'\xc2\xad', # ­ 0xae : b'\xc2\xae', # ® 0xaf : b'\xc2\xaf', # ¯ 0xb0 : b'\xc2\xb0', # ° 0xb1 : b'\xc2\xb1', # ± 0xb2 : b'\xc2\xb2', # ² 0xb3 : b'\xc2\xb3', # ³ 0xb4 : b'\xc2\xb4', # ´ 0xb5 : b'\xc2\xb5', # µ 0xb6 : b'\xc2\xb6', # ¶ 0xb7 : b'\xc2\xb7', # · 0xb8 : b'\xc2\xb8', # ¸ 0xb9 : b'\xc2\xb9', # ¹ 0xba : b'\xc2\xba', # º 0xbb : b'\xc2\xbb', # » 0xbc : b'\xc2\xbc', # ¼ 0xbd : b'\xc2\xbd', # ½ 0xbe : b'\xc2\xbe', # ¾ 0xbf : b'\xc2\xbf', # ¿ 0xc0 : b'\xc3\x80', # À 0xc1 : b'\xc3\x81', # Á 0xc2 : b'\xc3\x82', #  0xc3 : b'\xc3\x83', # à 0xc4 : b'\xc3\x84', # Ä 0xc5 : b'\xc3\x85', # Å 0xc6 : b'\xc3\x86', # Æ 0xc7 : b'\xc3\x87', # Ç 0xc8 : b'\xc3\x88', # È 0xc9 : b'\xc3\x89', # É 0xca : b'\xc3\x8a', # Ê 0xcb : b'\xc3\x8b', # Ë 0xcc : b'\xc3\x8c', # Ì 0xcd : b'\xc3\x8d', # Í 0xce : b'\xc3\x8e', # Î 0xcf : b'\xc3\x8f', # Ï 0xd0 : b'\xc3\x90', # Ð 0xd1 : b'\xc3\x91', # Ñ 0xd2 : b'\xc3\x92', # Ò 0xd3 : b'\xc3\x93', # Ó 0xd4 : b'\xc3\x94', # Ô 0xd5 : b'\xc3\x95', # Õ 0xd6 : b'\xc3\x96', # Ö 0xd7 : b'\xc3\x97', # × 0xd8 : b'\xc3\x98', # Ø 0xd9 : b'\xc3\x99', # Ù 0xda : b'\xc3\x9a', # Ú 0xdb : b'\xc3\x9b', # Û 0xdc : b'\xc3\x9c', # Ü 0xdd : b'\xc3\x9d', # Ý 0xde : b'\xc3\x9e', # Þ 0xdf : b'\xc3\x9f', # ß 0xe0 : b'\xc3\xa0', # à 0xe1 : b'\xa1', # á 0xe2 : b'\xc3\xa2', # â 0xe3 : b'\xc3\xa3', # ã 0xe4 : b'\xc3\xa4', # ä 0xe5 : b'\xc3\xa5', # å 0xe6 : b'\xc3\xa6', # æ 0xe7 : b'\xc3\xa7', # ç 0xe8 : b'\xc3\xa8', # è 0xe9 : b'\xc3\xa9', # é 0xea : b'\xc3\xaa', # ê 0xeb : b'\xc3\xab', # ë 0xec : b'\xc3\xac', # ì 0xed : b'\xc3\xad', # í 0xee : b'\xc3\xae', # î 0xef : b'\xc3\xaf', # ï 0xf0 : b'\xc3\xb0', # ð 0xf1 : b'\xc3\xb1', # ñ 0xf2 : b'\xc3\xb2', # ò 0xf3 : b'\xc3\xb3', # ó 0xf4 : b'\xc3\xb4', # ô 0xf5 : b'\xc3\xb5', # õ 0xf6 : b'\xc3\xb6', # ö 0xf7 : b'\xc3\xb7', # ÷ 0xf8 : b'\xc3\xb8', # ø 0xf9 : b'\xc3\xb9', # ù 0xfa : b'\xc3\xba', # ú 0xfb : b'\xc3\xbb', # û 0xfc : b'\xc3\xbc', # ü 0xfd : b'\xc3\xbd', # ý 0xfe : b'\xc3\xbe', # þ } MULTIBYTE_MARKERS_AND_SIZES = [ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF (0xe0, 0xef, 3), # 3-byte characters start with E0-EF (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 ] FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] @classmethod def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"): """Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. :param in_bytes: A bytestring that you suspect contains characters from multiple encodings. Note that this _must_ be a bytestring. If you've already converted the document to Unicode, you're too late. :param main_encoding: The primary encoding of `in_bytes`. :param embedded_encoding: The encoding that was used to embed characters in the main document. :return: A bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents. """ if embedded_encoding.replace('_', '-').lower() not in ( 'windows-1252', 'windows_1252'): raise NotImplementedError( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings.") if main_encoding.lower() not in ('utf8', 'utf-8'): raise NotImplementedError( "UTF-8 is the only currently supported main encoding.") byte_chunks = [] chunk_start = 0 pos = 0 while pos < len(in_bytes): byte = in_bytes[pos] if not isinstance(byte, int): # Python 2.x byte = ord(byte) if (byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER): # This is the start of a UTF-8 multibyte character. Skip # to the end. for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: if byte >= start and byte <= end: pos += size break elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: # We found a Windows-1252 character! # Save the string up to this point as a chunk. byte_chunks.append(in_bytes[chunk_start:pos]) # Now translate the Windows-1252 character into UTF-8 # and add it as another, one-byte chunk. byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) pos += 1 chunk_start = pos else: # Go on to the next character. pos += 1 if chunk_start == 0: # The string is unchanged. return in_bytes else: # Store the final chunk. byte_chunks.append(in_bytes[chunk_start:]) return b''.join(byte_chunks)
98,709
Python
.py
3,212
24.036737
112
0.503556
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,489
_htmlparser.py
rembo10_headphones/lib/bs4/builder/_htmlparser.py
# encoding: utf-8 """Use the HTMLParser library to parse HTML files that aren't too bad.""" # Use of this source code is governed by the MIT license. __license__ = "MIT" __all__ = [ 'HTMLParserTreeBuilder', ] from html.parser import HTMLParser try: from html.parser import HTMLParseError except ImportError as e: # HTMLParseError is removed in Python 3.5. Since it can never be # thrown in 3.5, we can just define our own class as a placeholder. class HTMLParseError(Exception): pass import sys import warnings # Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major, minor, release = sys.version_info[:3] CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3 CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3 CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 from bs4.element import ( CData, Comment, Declaration, Doctype, ProcessingInstruction, ) from bs4.dammit import EntitySubstitution, UnicodeDammit from bs4.builder import ( HTML, HTMLTreeBuilder, STRICT, ) HTMLPARSER = 'html.parser' class BeautifulSoupHTMLParser(HTMLParser): """A subclass of the Python standard library's HTMLParser class, which listens for HTMLParser events and translates them into calls to Beautiful Soup's tree construction API. """ # Strategies for handling duplicate attributes IGNORE = 'ignore' REPLACE = 'replace' def __init__(self, *args, **kwargs): """Constructor. :param on_duplicate_attribute: A strategy for what to do if a tag includes the same attribute more than once. Accepted values are: REPLACE (replace earlier values with later ones, the default), IGNORE (keep the earliest value encountered), or a callable. A callable must take three arguments: the dictionary of attributes already processed, the name of the duplicate attribute, and the most recent value encountered. """ self.on_duplicate_attribute = kwargs.pop( 'on_duplicate_attribute', self.REPLACE ) HTMLParser.__init__(self, *args, **kwargs) # Keep a list of empty-element tags that were encountered # without an explicit closing tag. If we encounter a closing tag # of this type, we'll associate it with one of those entries. # # This isn't a stack because we don't care about the # order. It's a list of closing tags we've already handled and # will ignore, assuming they ever show up. self.already_closed_empty_element = [] def error(self, msg): """In Python 3, HTMLParser subclasses must implement error(), although this requirement doesn't appear to be documented. In Python 2, HTMLParser implements error() by raising an exception, which we don't want to do. In any event, this method is called only on very strange markup and our best strategy is to pretend it didn't happen and keep going. """ warnings.warn(msg) def handle_startendtag(self, name, attrs): """Handle an incoming empty-element tag. This is only called when the markup looks like <tag/>. :param name: Name of the tag. :param attrs: Dictionary of the tag's attributes. """ # is_startend() tells handle_starttag not to close the tag # just because its name matches a known empty-element tag. We # know that this is an empty-element tag and we want to call # handle_endtag ourselves. tag = self.handle_starttag(name, attrs, handle_empty_element=False) self.handle_endtag(name) def handle_starttag(self, name, attrs, handle_empty_element=True): """Handle an opening tag, e.g. '<tag>' :param name: Name of the tag. :param attrs: Dictionary of the tag's attributes. :param handle_empty_element: True if this tag is known to be an empty-element tag (i.e. there is not expected to be any closing tag). """ # XXX namespace attr_dict = {} for key, value in attrs: # Change None attribute values to the empty string # for consistency with the other tree builders. if value is None: value = '' if key in attr_dict: # A single attribute shows up multiple times in this # tag. How to handle it depends on the # on_duplicate_attribute setting. on_dupe = self.on_duplicate_attribute if on_dupe == self.IGNORE: pass elif on_dupe in (None, self.REPLACE): attr_dict[key] = value else: on_dupe(attr_dict, key, value) else: attr_dict[key] = value attrvalue = '""' #print("START", name) sourceline, sourcepos = self.getpos() tag = self.soup.handle_starttag( name, None, None, attr_dict, sourceline=sourceline, sourcepos=sourcepos ) if tag and tag.is_empty_element and handle_empty_element: # Unlike other parsers, html.parser doesn't send separate end tag # events for empty-element tags. (It's handled in # handle_startendtag, but only if the original markup looked like # <tag/>.) # # So we need to call handle_endtag() ourselves. Since we # know the start event is identical to the end event, we # don't want handle_endtag() to cross off any previous end # events for tags of this name. self.handle_endtag(name, check_already_closed=False) # But we might encounter an explicit closing tag for this tag # later on. If so, we want to ignore it. self.already_closed_empty_element.append(name) def handle_endtag(self, name, check_already_closed=True): """Handle a closing tag, e.g. '</tag>' :param name: A tag name. :param check_already_closed: True if this tag is expected to be the closing portion of an empty-element tag, e.g. '<tag></tag>'. """ #print("END", name) if check_already_closed and name in self.already_closed_empty_element: # This is a redundant end tag for an empty-element tag. # We've already called handle_endtag() for it, so just # check it off the list. #print("ALREADY CLOSED", name) self.already_closed_empty_element.remove(name) else: self.soup.handle_endtag(name) def handle_data(self, data): """Handle some textual data that shows up between tags.""" self.soup.handle_data(data) def handle_charref(self, name): """Handle a numeric character reference by converting it to the corresponding Unicode character and treating it as textual data. :param name: Character number, possibly in hexadecimal. """ # XXX workaround for a bug in HTMLParser. Remove this once # it's fixed in all supported versions. # http://bugs.python.org/issue13633 if name.startswith('x'): real_name = int(name.lstrip('x'), 16) elif name.startswith('X'): real_name = int(name.lstrip('X'), 16) else: real_name = int(name) data = None if real_name < 256: # HTML numeric entities are supposed to reference Unicode # code points, but sometimes they reference code points in # some other encoding (ahem, Windows-1252). E.g. &#147; # instead of &#201; for LEFT DOUBLE QUOTATION MARK. This # code tries to detect this situation and compensate. for encoding in (self.soup.original_encoding, 'windows-1252'): if not encoding: continue try: data = bytearray([real_name]).decode(encoding) except UnicodeDecodeError as e: pass if not data: try: data = chr(real_name) except (ValueError, OverflowError) as e: pass data = data or "\N{REPLACEMENT CHARACTER}" self.handle_data(data) def handle_entityref(self, name): """Handle a named entity reference by converting it to the corresponding Unicode character(s) and treating it as textual data. :param name: Name of the entity reference. """ character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) if character is not None: data = character else: # If this were XML, it would be ambiguous whether "&foo" # was an character entity reference with a missing # semicolon or the literal string "&foo". Since this is # HTML, we have a complete list of all character entity references, # and this one wasn't found, so assume it's the literal string "&foo". data = "&%s" % name self.handle_data(data) def handle_comment(self, data): """Handle an HTML comment. :param data: The text of the comment. """ self.soup.endData() self.soup.handle_data(data) self.soup.endData(Comment) def handle_decl(self, data): """Handle a DOCTYPE declaration. :param data: The text of the declaration. """ self.soup.endData() data = data[len("DOCTYPE "):] self.soup.handle_data(data) self.soup.endData(Doctype) def unknown_decl(self, data): """Handle a declaration of unknown type -- probably a CDATA block. :param data: The text of the declaration. """ if data.upper().startswith('CDATA['): cls = CData data = data[len('CDATA['):] else: cls = Declaration self.soup.endData() self.soup.handle_data(data) self.soup.endData(cls) def handle_pi(self, data): """Handle a processing instruction. :param data: The text of the instruction. """ self.soup.endData() self.soup.handle_data(data) self.soup.endData(ProcessingInstruction) class HTMLParserTreeBuilder(HTMLTreeBuilder): """A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser, found in the Python standard library. """ is_xml = False picklable = True NAME = HTMLPARSER features = [NAME, HTML, STRICT] # The html.parser knows which line number and position in the # original file is the source of an element. TRACKS_LINE_NUMBERS = True def __init__(self, parser_args=None, parser_kwargs=None, **kwargs): """Constructor. :param parser_args: Positional arguments to pass into the BeautifulSoupHTMLParser constructor, once it's invoked. :param parser_kwargs: Keyword arguments to pass into the BeautifulSoupHTMLParser constructor, once it's invoked. :param kwargs: Keyword arguments for the superclass constructor. """ # Some keyword arguments will be pulled out of kwargs and placed # into parser_kwargs. extra_parser_kwargs = dict() for arg in ('on_duplicate_attribute',): if arg in kwargs: value = kwargs.pop(arg) extra_parser_kwargs[arg] = value super(HTMLParserTreeBuilder, self).__init__(**kwargs) parser_args = parser_args or [] parser_kwargs = parser_kwargs or {} parser_kwargs.update(extra_parser_kwargs) if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED: parser_kwargs['strict'] = False if CONSTRUCTOR_TAKES_CONVERT_CHARREFS: parser_kwargs['convert_charrefs'] = False self.parser_args = (parser_args, parser_kwargs) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None, exclude_encodings=None): """Run any preliminary steps necessary to make incoming markup acceptable to the parser. :param markup: Some markup -- probably a bytestring. :param user_specified_encoding: The user asked to try this encoding. :param document_declared_encoding: The markup itself claims to be in this encoding. :param exclude_encodings: The user asked _not_ to try any of these encodings. :yield: A series of 4-tuples: (markup, encoding, declared encoding, has undergone character replacement) Each 4-tuple represents a strategy for converting the document to Unicode and parsing it. Each strategy will be tried in turn. """ if isinstance(markup, str): # Parse Unicode as-is. yield (markup, None, None, False) return # Ask UnicodeDammit to sniff the most likely encoding. # This was provided by the end-user; treat it as a known # definite encoding per the algorithm laid out in the HTML5 # spec. (See the EncodingDetector class for details.) known_definite_encodings = [user_specified_encoding] # This was found in the document; treat it as a slightly lower-priority # user encoding. user_encodings = [document_declared_encoding] try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit( markup, known_definite_encodings=known_definite_encodings, user_encodings=user_encodings, is_html=True, exclude_encodings=exclude_encodings ) yield (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): """Run some incoming markup through some parsing process, populating the `BeautifulSoup` object in self.soup. """ args, kwargs = self.parser_args parser = BeautifulSoupHTMLParser(*args, **kwargs) parser.soup = self.soup try: parser.feed(markup) parser.close() except HTMLParseError as e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e parser.already_closed_empty_element = [] # Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: import re attrfind_tolerant = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """, re.VERBOSE) BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend from html.parser import tagfind, attrfind def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = rawdata[i+1:k].lower() while k < endpos: if self.strict: m = attrfind.match(rawdata, k) else: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = self.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) if self.strict: self.error("junk characters in start tag: %r" % (rawdata[k:endpos][:20],)) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) BeautifulSoupHTMLParser.parse_starttag = parse_starttag BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode CONSTRUCTOR_TAKES_STRICT = True
18,933
Python
.py
427
34.47541
318
0.60871
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,490
_lxml.py
rembo10_headphones/lib/bs4/builder/_lxml.py
# Use of this source code is governed by the MIT license. __license__ = "MIT" __all__ = [ 'LXMLTreeBuilderForXML', 'LXMLTreeBuilder', ] try: from collections.abc import Callable # Python 3.6 except ImportError as e: from collections import Callable from io import BytesIO from io import StringIO from lxml import etree from bs4.element import ( Comment, Doctype, NamespacedAttribute, ProcessingInstruction, XMLProcessingInstruction, ) from bs4.builder import ( FAST, HTML, HTMLTreeBuilder, PERMISSIVE, ParserRejectedMarkup, TreeBuilder, XML) from bs4.dammit import EncodingDetector LXML = 'lxml' def _invert(d): "Invert a dictionary." return dict((v,k) for k, v in list(d.items())) class LXMLTreeBuilderForXML(TreeBuilder): DEFAULT_PARSER_CLASS = etree.XMLParser is_xml = True processing_instruction_class = XMLProcessingInstruction NAME = "lxml-xml" ALTERNATE_NAMES = ["xml"] # Well, it's permissive by XML parser standards. features = [NAME, LXML, XML, FAST, PERMISSIVE] CHUNK_SIZE = 512 # This namespace mapping is specified in the XML Namespace # standard. DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace') DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS) # NOTE: If we parsed Element objects and looked at .sourceline, # we'd be able to see the line numbers from the original document. # But instead we build an XMLParser or HTMLParser object to serve # as the target of parse messages, and those messages don't include # line numbers. # See: https://bugs.launchpad.net/lxml/+bug/1846906 def initialize_soup(self, soup): """Let the BeautifulSoup object know about the standard namespace mapping. :param soup: A `BeautifulSoup`. """ super(LXMLTreeBuilderForXML, self).initialize_soup(soup) self._register_namespaces(self.DEFAULT_NSMAPS) def _register_namespaces(self, mapping): """Let the BeautifulSoup object know about namespaces encountered while parsing the document. This might be useful later on when creating CSS selectors. :param mapping: A dictionary mapping namespace prefixes to URIs. """ for key, value in list(mapping.items()): if key and key not in self.soup._namespaces: # Let the BeautifulSoup object know about a new namespace. # If there are multiple namespaces defined with the same # prefix, the first one in the document takes precedence. self.soup._namespaces[key] = value def default_parser(self, encoding): """Find the default parser for the given encoding. :param encoding: A string. :return: Either a parser object or a class, which will be instantiated with default arguments. """ if self._default_parser is not None: return self._default_parser return etree.XMLParser( target=self, strip_cdata=False, recover=True, encoding=encoding) def parser_for(self, encoding): """Instantiate an appropriate parser for the given encoding. :param encoding: A string. :return: A parser object such as an `etree.XMLParser`. """ # Use the default parser. parser = self.default_parser(encoding) if isinstance(parser, Callable): # Instantiate the parser with default arguments parser = parser( target=self, strip_cdata=False, recover=True, encoding=encoding ) return parser def __init__(self, parser=None, empty_element_tags=None, **kwargs): # TODO: Issue a warning if parser is present but not a # callable, since that means there's no way to create new # parsers for different encodings. self._default_parser = parser if empty_element_tags is not None: self.empty_element_tags = set(empty_element_tags) self.soup = None self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] super(LXMLTreeBuilderForXML, self).__init__(**kwargs) def _getNsTag(self, tag): # Split the namespace URL out of a fully-qualified lxml tag # name. Copied from lxml's src/lxml/sax.py. if tag[0] == '{': return tuple(tag[1:].split('}', 1)) else: return (None, tag) def prepare_markup(self, markup, user_specified_encoding=None, exclude_encodings=None, document_declared_encoding=None): """Run any preliminary steps necessary to make incoming markup acceptable to the parser. lxml really wants to get a bytestring and convert it to Unicode itself. So instead of using UnicodeDammit to convert the bytestring to Unicode using different encodings, this implementation uses EncodingDetector to iterate over the encodings, and tell lxml to try to parse the document as each one in turn. :param markup: Some markup -- hopefully a bytestring. :param user_specified_encoding: The user asked to try this encoding. :param document_declared_encoding: The markup itself claims to be in this encoding. :param exclude_encodings: The user asked _not_ to try any of these encodings. :yield: A series of 4-tuples: (markup, encoding, declared encoding, has undergone character replacement) Each 4-tuple represents a strategy for converting the document to Unicode and parsing it. Each strategy will be tried in turn. """ is_html = not self.is_xml if is_html: self.processing_instruction_class = ProcessingInstruction else: self.processing_instruction_class = XMLProcessingInstruction if isinstance(markup, str): # We were given Unicode. Maybe lxml can parse Unicode on # this system? yield markup, None, document_declared_encoding, False if isinstance(markup, str): # No, apparently not. Convert the Unicode to UTF-8 and # tell lxml to parse it as UTF-8. yield (markup.encode("utf8"), "utf8", document_declared_encoding, False) # This was provided by the end-user; treat it as a known # definite encoding per the algorithm laid out in the HTML5 # spec. (See the EncodingDetector class for details.) known_definite_encodings = [user_specified_encoding] # This was found in the document; treat it as a slightly lower-priority # user encoding. user_encodings = [document_declared_encoding] detector = EncodingDetector( markup, known_definite_encodings=known_definite_encodings, user_encodings=user_encodings, is_html=is_html, exclude_encodings=exclude_encodings ) for encoding in detector.encodings: yield (detector.markup, encoding, document_declared_encoding, False) def feed(self, markup): if isinstance(markup, bytes): markup = BytesIO(markup) elif isinstance(markup, str): markup = StringIO(markup) # Call feed() at least once, even if the markup is empty, # or the parser won't be initialized. data = markup.read(self.CHUNK_SIZE) try: self.parser = self.parser_for(self.soup.original_encoding) self.parser.feed(data) while len(data) != 0: # Now call feed() on the rest of the data, chunk by chunk. data = markup.read(self.CHUNK_SIZE) if len(data) != 0: self.parser.feed(data) self.parser.close() except (UnicodeDecodeError, LookupError, etree.ParserError) as e: raise ParserRejectedMarkup(e) def close(self): self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED] def start(self, name, attrs, nsmap={}): # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. attrs = dict(attrs) nsprefix = None # Invert each namespace map as it comes in. if len(nsmap) == 0 and len(self.nsmaps) > 1: # There are no new namespaces for this tag, but # non-default namespaces are in play, so we need a # separate tag stack to know when they end. self.nsmaps.append(None) elif len(nsmap) > 0: # A new namespace mapping has come into play. # First, Let the BeautifulSoup object know about it. self._register_namespaces(nsmap) # Then, add it to our running list of inverted namespace # mappings. self.nsmaps.append(_invert(nsmap)) # Also treat the namespace mapping as a set of attributes on the # tag, so we can recreate it later. attrs = attrs.copy() for prefix, namespace in list(nsmap.items()): attribute = NamespacedAttribute( "xmlns", prefix, "http://www.w3.org/2000/xmlns/") attrs[attribute] = namespace # Namespaces are in play. Find any attributes that came in # from lxml with namespaces attached to their names, and # turn then into NamespacedAttribute objects. new_attrs = {} for attr, value in list(attrs.items()): namespace, attr = self._getNsTag(attr) if namespace is None: new_attrs[attr] = value else: nsprefix = self._prefix_for_namespace(namespace) attr = NamespacedAttribute(nsprefix, attr, namespace) new_attrs[attr] = value attrs = new_attrs namespace, name = self._getNsTag(name) nsprefix = self._prefix_for_namespace(namespace) self.soup.handle_starttag(name, namespace, nsprefix, attrs) def _prefix_for_namespace(self, namespace): """Find the currently active prefix for the given namespace.""" if namespace is None: return None for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: return inverted_nsmap[namespace] return None def end(self, name): self.soup.endData() completed_tag = self.soup.tagStack[-1] namespace, name = self._getNsTag(name) nsprefix = None if namespace is not None: for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: nsprefix = inverted_nsmap[namespace] break self.soup.handle_endtag(name, nsprefix) if len(self.nsmaps) > 1: # This tag, or one of its parents, introduced a namespace # mapping, so pop it off the stack. self.nsmaps.pop() def pi(self, target, data): self.soup.endData() self.soup.handle_data(target + ' ' + data) self.soup.endData(self.processing_instruction_class) def data(self, content): self.soup.handle_data(content) def doctype(self, name, pubid, system): self.soup.endData() doctype = Doctype.for_name_and_ids(name, pubid, system) self.soup.object_was_parsed(doctype) def comment(self, content): "Handle comments as Comment objects." self.soup.endData() self.soup.handle_data(content) self.soup.endData(Comment) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): NAME = LXML ALTERNATE_NAMES = ["lxml-html"] features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE] is_xml = False processing_instruction_class = ProcessingInstruction def default_parser(self, encoding): return etree.HTMLParser def feed(self, markup): encoding = self.soup.original_encoding try: self.parser = self.parser_for(encoding) self.parser.feed(markup) self.parser.close() except (UnicodeDecodeError, LookupError, etree.ParserError) as e: raise ParserRejectedMarkup(e) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return '<html><body>%s</body></html>' % fragment
12,699
Python
.py
284
35.137324
82
0.639206
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,491
__init__.py
rembo10_headphones/lib/bs4/builder/__init__.py
# Use of this source code is governed by the MIT license. __license__ = "MIT" from collections import defaultdict import itertools import sys from bs4.element import ( CharsetMetaAttributeValue, ContentMetaAttributeValue, Stylesheet, Script, TemplateString, nonwhitespace_re ) __all__ = [ 'HTMLTreeBuilder', 'SAXTreeBuilder', 'TreeBuilder', 'TreeBuilderRegistry', ] # Some useful features for a TreeBuilder to have. FAST = 'fast' PERMISSIVE = 'permissive' STRICT = 'strict' XML = 'xml' HTML = 'html' HTML_5 = 'html5' class TreeBuilderRegistry(object): """A way of looking up TreeBuilder subclasses by their name or by desired features. """ def __init__(self): self.builders_for_feature = defaultdict(list) self.builders = [] def register(self, treebuilder_class): """Register a treebuilder based on its advertised features. :param treebuilder_class: A subclass of Treebuilder. its .features attribute should list its features. """ for feature in treebuilder_class.features: self.builders_for_feature[feature].insert(0, treebuilder_class) self.builders.insert(0, treebuilder_class) def lookup(self, *features): """Look up a TreeBuilder subclass with the desired features. :param features: A list of features to look for. If none are provided, the most recently registered TreeBuilder subclass will be used. :return: A TreeBuilder subclass, or None if there's no registered subclass with all the requested features. """ if len(self.builders) == 0: # There are no builders at all. return None if len(features) == 0: # They didn't ask for any features. Give them the most # recently registered builder. return self.builders[0] # Go down the list of features in order, and eliminate any builders # that don't match every feature. features = list(features) features.reverse() candidates = None candidate_set = None while len(features) > 0: feature = features.pop() we_have_the_feature = self.builders_for_feature.get(feature, []) if len(we_have_the_feature) > 0: if candidates is None: candidates = we_have_the_feature candidate_set = set(candidates) else: # Eliminate any candidates that don't have this feature. candidate_set = candidate_set.intersection( set(we_have_the_feature)) # The only valid candidates are the ones in candidate_set. # Go through the original list of candidates and pick the first one # that's in candidate_set. if candidate_set is None: return None for candidate in candidates: if candidate in candidate_set: return candidate return None # The BeautifulSoup class will take feature lists from developers and use them # to look up builders in this registry. builder_registry = TreeBuilderRegistry() class TreeBuilder(object): """Turn a textual document into a Beautiful Soup object tree.""" NAME = "[Unknown tree builder]" ALTERNATE_NAMES = [] features = [] is_xml = False picklable = False empty_element_tags = None # A tag will be considered an empty-element # tag when and only when it has no contents. # A value for these tag/attribute combinations is a space- or # comma-separated list of CDATA, rather than a single CDATA. DEFAULT_CDATA_LIST_ATTRIBUTES = {} # Whitespace should be preserved inside these tags. DEFAULT_PRESERVE_WHITESPACE_TAGS = set() # The textual contents of tags with these names should be # instantiated with some class other than NavigableString. DEFAULT_STRING_CONTAINERS = {} USE_DEFAULT = object() # Most parsers don't keep track of line numbers. TRACKS_LINE_NUMBERS = False def __init__(self, multi_valued_attributes=USE_DEFAULT, preserve_whitespace_tags=USE_DEFAULT, store_line_numbers=USE_DEFAULT, string_containers=USE_DEFAULT, ): """Constructor. :param multi_valued_attributes: If this is set to None, the TreeBuilder will not turn any values for attributes like 'class' into lists. Setting this to a dictionary will customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES for an example. Internally, these are called "CDATA list attributes", but that probably doesn't make sense to an end-user, so the argument name is `multi_valued_attributes`. :param preserve_whitespace_tags: A list of tags to treat the way <pre> tags are treated in HTML. Tags in this list are immune from pretty-printing; their contents will always be output as-is. :param string_containers: A dictionary mapping tag names to the classes that should be instantiated to contain the textual contents of those tags. The default is to use NavigableString for every tag, no matter what the name. You can override the default by changing DEFAULT_STRING_CONTAINERS. :param store_line_numbers: If the parser keeps track of the line numbers and positions of the original markup, that information will, by default, be stored in each corresponding `Tag` object. You can turn this off by passing store_line_numbers=False. If the parser you're using doesn't keep track of this information, then setting store_line_numbers=True will do nothing. """ self.soup = None if multi_valued_attributes is self.USE_DEFAULT: multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES self.cdata_list_attributes = multi_valued_attributes if preserve_whitespace_tags is self.USE_DEFAULT: preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS self.preserve_whitespace_tags = preserve_whitespace_tags if store_line_numbers == self.USE_DEFAULT: store_line_numbers = self.TRACKS_LINE_NUMBERS self.store_line_numbers = store_line_numbers if string_containers == self.USE_DEFAULT: string_containers = self.DEFAULT_STRING_CONTAINERS self.string_containers = string_containers def initialize_soup(self, soup): """The BeautifulSoup object has been initialized and is now being associated with the TreeBuilder. :param soup: A BeautifulSoup object. """ self.soup = soup def reset(self): """Do any work necessary to reset the underlying parser for a new document. By default, this does nothing. """ pass def can_be_empty_element(self, tag_name): """Might a tag with this name be an empty-element tag? The final markup may or may not actually present this tag as self-closing. For instance: an HTMLBuilder does not consider a <p> tag to be an empty-element tag (it's not in HTMLBuilder.empty_element_tags). This means an empty <p> tag will be presented as "<p></p>", not "<p/>" or "<p>". The default implementation has no opinion about which tags are empty-element tags, so a tag will be presented as an empty-element tag if and only if it has no children. "<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will be left alone. :param tag_name: The name of a markup tag. """ if self.empty_element_tags is None: return True return tag_name in self.empty_element_tags def feed(self, markup): """Run some incoming markup through some parsing process, populating the `BeautifulSoup` object in self.soup. This method is not implemented in TreeBuilder; it must be implemented in subclasses. :return: None. """ raise NotImplementedError() def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None, exclude_encodings=None): """Run any preliminary steps necessary to make incoming markup acceptable to the parser. :param markup: Some markup -- probably a bytestring. :param user_specified_encoding: The user asked to try this encoding. :param document_declared_encoding: The markup itself claims to be in this encoding. NOTE: This argument is not used by the calling code and can probably be removed. :param exclude_encodings: The user asked _not_ to try any of these encodings. :yield: A series of 4-tuples: (markup, encoding, declared encoding, has undergone character replacement) Each 4-tuple represents a strategy for converting the document to Unicode and parsing it. Each strategy will be tried in turn. By default, the only strategy is to parse the markup as-is. See `LXMLTreeBuilderForXML` and `HTMLParserTreeBuilder` for implementations that take into account the quirks of particular parsers. """ yield markup, None, None, False def test_fragment_to_document(self, fragment): """Wrap an HTML fragment to make it look like a document. Different parsers do this differently. For instance, lxml introduces an empty <head> tag, and html5lib doesn't. Abstracting this away lets us write simple tests which run HTML fragments through the parser and compare the results against other HTML fragments. This method should not be used outside of tests. :param fragment: A string -- fragment of HTML. :return: A string -- a full HTML document. """ return fragment def set_up_substitutions(self, tag): """Set up any substitutions that will need to be performed on a `Tag` when it's output as a string. By default, this does nothing. See `HTMLTreeBuilder` for a case where this is used. :param tag: A `Tag` :return: Whether or not a substitution was performed. """ return False def _replace_cdata_list_attribute_values(self, tag_name, attrs): """When an attribute value is associated with a tag that can have multiple values for that attribute, convert the string value to a list of strings. Basically, replaces class="foo bar" with class=["foo", "bar"] NOTE: This method modifies its input in place. :param tag_name: The name of a tag. :param attrs: A dictionary containing the tag's attributes. Any appropriate attribute values will be modified in place. """ if not attrs: return attrs if self.cdata_list_attributes: universal = self.cdata_list_attributes.get('*', []) tag_specific = self.cdata_list_attributes.get( tag_name.lower(), None) for attr in list(attrs.keys()): if attr in universal or (tag_specific and attr in tag_specific): # We have a "class"-type attribute whose string # value is a whitespace-separated list of # values. Split it into a list. value = attrs[attr] if isinstance(value, str): values = nonwhitespace_re.findall(value) else: # html5lib sometimes calls setAttributes twice # for the same tag when rearranging the parse # tree. On the second call the attribute value # here is already a list. If this happens, # leave the value alone rather than trying to # split it again. values = value attrs[attr] = values return attrs class SAXTreeBuilder(TreeBuilder): """A Beautiful Soup treebuilder that listens for SAX events. This is not currently used for anything, but it demonstrates how a simple TreeBuilder would work. """ def feed(self, markup): raise NotImplementedError() def close(self): pass def startElement(self, name, attrs): attrs = dict((key[1], value) for key, value in list(attrs.items())) #print("Start %s, %r" % (name, attrs)) self.soup.handle_starttag(name, attrs) def endElement(self, name): #print("End %s" % name) self.soup.handle_endtag(name) def startElementNS(self, nsTuple, nodeName, attrs): # Throw away (ns, nodeName) for now. self.startElement(nodeName, attrs) def endElementNS(self, nsTuple, nodeName): # Throw away (ns, nodeName) for now. self.endElement(nodeName) #handler.endElementNS((ns, node.nodeName), node.nodeName) def startPrefixMapping(self, prefix, nodeValue): # Ignore the prefix for now. pass def endPrefixMapping(self, prefix): # Ignore the prefix for now. # handler.endPrefixMapping(prefix) pass def characters(self, content): self.soup.handle_data(content) def startDocument(self): pass def endDocument(self): pass class HTMLTreeBuilder(TreeBuilder): """This TreeBuilder knows facts about HTML. Such as which tags are empty-element tags. """ empty_element_tags = set([ # These are from HTML5. 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', # These are from earlier versions of HTML and are removed in HTML5. 'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer' ]) # The HTML standard defines these as block-level elements. Beautiful # Soup does not treat these elements differently from other elements, # but it may do so eventually, and this information is available if # you need to use it. block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"]) # The HTML standard defines an unusual content model for these tags. # We represent this by using a string class other than NavigableString # inside these tags. # # I made this list by going through the HTML spec # (https://html.spec.whatwg.org/#metadata-content) and looking for # "metadata content" elements that can contain strings. # # TODO: Arguably <noscript> could go here but it seems # qualitatively different from the other tags. DEFAULT_STRING_CONTAINERS = { 'style': Stylesheet, 'script': Script, 'template': TemplateString, } # The HTML standard defines these attributes as containing a # space-separated list of values, not a single value. That is, # class="foo bar" means that the 'class' attribute has two values, # 'foo' and 'bar', not the single value 'foo bar'. When we # encounter one of these attributes, we will parse its value into # a list of values if possible. Upon output, the list will be # converted back into a string. DEFAULT_CDATA_LIST_ATTRIBUTES = { "*" : ['class', 'accesskey', 'dropzone'], "a" : ['rel', 'rev'], "link" : ['rel', 'rev'], "td" : ["headers"], "th" : ["headers"], "td" : ["headers"], "form" : ["accept-charset"], "object" : ["archive"], # These are HTML5 specific, as are *.accesskey and *.dropzone above. "area" : ["rel"], "icon" : ["sizes"], "iframe" : ["sandbox"], "output" : ["for"], } DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) def set_up_substitutions(self, tag): """Replace the declared encoding in a <meta> tag with a placeholder, to be substituted when the tag is output to a string. An HTML document may come in to Beautiful Soup as one encoding, but exit in a different encoding, and the <meta> tag needs to be changed to reflect this. :param tag: A `Tag` :return: Whether or not a substitution was performed. """ # We are only interested in <meta> tags if tag.name != 'meta': return False http_equiv = tag.get('http-equiv') content = tag.get('content') charset = tag.get('charset') # We are interested in <meta> tags that say what encoding the # document was originally in. This means HTML 5-style <meta> # tags that provide the "charset" attribute. It also means # HTML 4-style <meta> tags that provide the "content" # attribute and have "http-equiv" set to "content-type". # # In both cases we will replace the value of the appropriate # attribute with a standin object that can take on any # encoding. meta_encoding = None if charset is not None: # HTML 5 style: # <meta charset="utf8"> meta_encoding = charset tag['charset'] = CharsetMetaAttributeValue(charset) elif (content is not None and http_equiv is not None and http_equiv.lower() == 'content-type'): # HTML 4 style: # <meta http-equiv="content-type" content="text/html; charset=utf8"> tag['content'] = ContentMetaAttributeValue(content) return (meta_encoding is not None) def register_treebuilders_from(module): """Copy TreeBuilders from the given module into this module.""" this_module = sys.modules[__name__] for name in module.__all__: obj = getattr(module, name) if issubclass(obj, TreeBuilder): setattr(this_module, name, obj) this_module.__all__.append(name) # Register the builder while we're at it. this_module.builder_registry.register(obj) class ParserRejectedMarkup(Exception): """An Exception to be raised when the underlying parser simply refuses to parse the given markup. """ def __init__(self, message_or_exception): """Explain why the parser rejected the given markup, either with a textual explanation or another exception. """ if isinstance(message_or_exception, Exception): e = message_or_exception message_or_exception = "%s: %s" % (e.__class__.__name__, str(e)) super(ParserRejectedMarkup, self).__init__(message_or_exception) # Builders are registered in reverse order of priority, so that custom # builder registrations will take precedence. In general, we want lxml # to take precedence over html5lib, because it's faster. And we only # want to use HTMLParser as a last resort. from . import _htmlparser register_treebuilders_from(_htmlparser) try: from . import _html5lib register_treebuilders_from(_html5lib) except ImportError: # They don't have html5lib installed. pass try: from . import _lxml register_treebuilders_from(_lxml) except ImportError: # They don't have lxml installed. pass
19,870
Python
.py
430
37.116279
317
0.641813
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,492
_html5lib.py
rembo10_headphones/lib/bs4/builder/_html5lib.py
# Use of this source code is governed by the MIT license. __license__ = "MIT" __all__ = [ 'HTML5TreeBuilder', ] import warnings import re from bs4.builder import ( PERMISSIVE, HTML, HTML_5, HTMLTreeBuilder, ) from bs4.element import ( NamespacedAttribute, nonwhitespace_re, ) import html5lib from html5lib.constants import ( namespaces, prefixes, ) from bs4.element import ( Comment, Doctype, NavigableString, Tag, ) try: # Pre-0.99999999 from html5lib.treebuilders import _base as treebuilder_base new_html5lib = False except ImportError as e: # 0.99999999 and up from html5lib.treebuilders import base as treebuilder_base new_html5lib = True class HTML5TreeBuilder(HTMLTreeBuilder): """Use html5lib to build a tree. Note that this TreeBuilder does not support some features common to HTML TreeBuilders. Some of these features could theoretically be implemented, but at the very least it's quite difficult, because html5lib moves the parse tree around as it's being built. * This TreeBuilder doesn't use different subclasses of NavigableString based on the name of the tag in which the string was found. * You can't use a SoupStrainer to parse only part of a document. """ NAME = "html5lib" features = [NAME, PERMISSIVE, HTML_5, HTML] # html5lib can tell us which line number and position in the # original file is the source of an element. TRACKS_LINE_NUMBERS = True def prepare_markup(self, markup, user_specified_encoding, document_declared_encoding=None, exclude_encodings=None): # Store the user-specified encoding for use later on. self.user_specified_encoding = user_specified_encoding # document_declared_encoding and exclude_encodings aren't used # ATM because the html5lib TreeBuilder doesn't use # UnicodeDammit. if exclude_encodings: warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.") yield (markup, None, None, False) # These methods are defined by Beautiful Soup. def feed(self, markup): if self.soup.parse_only is not None: warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.") parser = html5lib.HTMLParser(tree=self.create_treebuilder) self.underlying_builder.parser = parser extra_kwargs = dict() if not isinstance(markup, str): if new_html5lib: extra_kwargs['override_encoding'] = self.user_specified_encoding else: extra_kwargs['encoding'] = self.user_specified_encoding doc = parser.parse(markup, **extra_kwargs) # Set the character encoding detected by the tokenizer. if isinstance(markup, str): # We need to special-case this because html5lib sets # charEncoding to UTF-8 if it gets Unicode input. doc.original_encoding = None else: original_encoding = parser.tokenizer.stream.charEncoding[0] if not isinstance(original_encoding, str): # In 0.99999999 and up, the encoding is an html5lib # Encoding object. We want to use a string for compatibility # with other tree builders. original_encoding = original_encoding.name doc.original_encoding = original_encoding self.underlying_builder.parser = None def create_treebuilder(self, namespaceHTMLElements): self.underlying_builder = TreeBuilderForHtml5lib( namespaceHTMLElements, self.soup, store_line_numbers=self.store_line_numbers ) return self.underlying_builder def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return '<html><head></head><body>%s</body></html>' % fragment class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder): def __init__(self, namespaceHTMLElements, soup=None, store_line_numbers=True, **kwargs): if soup: self.soup = soup else: from bs4 import BeautifulSoup # TODO: Why is the parser 'html.parser' here? To avoid an # infinite loop? self.soup = BeautifulSoup( "", "html.parser", store_line_numbers=store_line_numbers, **kwargs ) # TODO: What are **kwargs exactly? Should they be passed in # here in addition to/instead of being passed to the BeautifulSoup # constructor? super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) # This will be set later to an html5lib.html5parser.HTMLParser # object, which we can use to track the current line number. self.parser = None self.store_line_numbers = store_line_numbers def documentClass(self): self.soup.reset() return Element(self.soup, self.soup, None) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = Doctype.for_name_and_ids(name, publicId, systemId) self.soup.object_was_parsed(doctype) def elementClass(self, name, namespace): kwargs = {} if self.parser and self.store_line_numbers: # This represents the point immediately after the end of the # tag. We don't know when the tag started, but we do know # where it ended -- the character just before this one. sourceline, sourcepos = self.parser.tokenizer.stream.position() kwargs['sourceline'] = sourceline kwargs['sourcepos'] = sourcepos-1 tag = self.soup.new_tag(name, namespace, **kwargs) return Element(tag, self.soup, namespace) def commentClass(self, data): return TextNode(Comment(data), self.soup) def fragmentClass(self): from bs4 import BeautifulSoup # TODO: Why is the parser 'html.parser' here? To avoid an # infinite loop? self.soup = BeautifulSoup("", "html.parser") self.soup.name = "[document_fragment]" return Element(self.soup, self.soup, None) def appendChild(self, node): # XXX This code is not covered by the BS4 tests. self.soup.append(node.element) def getDocument(self): return self.soup def getFragment(self): return treebuilder_base.TreeBuilder.getFragment(self).element def testSerializer(self, element): from bs4 import BeautifulSoup rv = [] doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$') def serializeElement(element, indent=0): if isinstance(element, BeautifulSoup): pass if isinstance(element, Doctype): m = doctype_re.match(element) if m: name = m.group(1) if m.lastindex > 1: publicId = m.group(2) or "" systemId = m.group(3) or m.group(4) or "" rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % (' ' * indent, name, publicId, systemId)) else: rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name)) else: rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) elif isinstance(element, Comment): rv.append("|%s<!-- %s -->" % (' ' * indent, element)) elif isinstance(element, NavigableString): rv.append("|%s\"%s\"" % (' ' * indent, element)) else: if element.namespace: name = "%s %s" % (prefixes[element.namespace], element.name) else: name = element.name rv.append("|%s<%s>" % (' ' * indent, name)) if element.attrs: attributes = [] for name, value in list(element.attrs.items()): if isinstance(name, NamespacedAttribute): name = "%s %s" % (prefixes[name.namespace], name.name) if isinstance(value, list): value = " ".join(value) attributes.append((name, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) indent += 2 for child in element.children: serializeElement(child, indent) serializeElement(element, 0) return "\n".join(rv) class AttrList(object): def __init__(self, element): self.element = element self.attrs = dict(self.element.attrs) def __iter__(self): return list(self.attrs.items()).__iter__() def __setitem__(self, name, value): # If this attribute is a multi-valued attribute for this element, # turn its value into a list. list_attr = self.element.cdata_list_attributes if (name in list_attr['*'] or (self.element.name in list_attr and name in list_attr[self.element.name])): # A node that is being cloned may have already undergone # this procedure. if not isinstance(value, list): value = nonwhitespace_re.findall(value) self.element[name] = value def items(self): return list(self.attrs.items()) def keys(self): return list(self.attrs.keys()) def __len__(self): return len(self.attrs) def __getitem__(self, name): return self.attrs[name] def __contains__(self, name): return name in list(self.attrs.keys()) class Element(treebuilder_base.Node): def __init__(self, element, soup, namespace): treebuilder_base.Node.__init__(self, element.name) self.element = element self.soup = soup self.namespace = namespace def appendChild(self, node): string_child = child = None if isinstance(node, str): # Some other piece of code decided to pass in a string # instead of creating a TextElement object to contain the # string. string_child = child = node elif isinstance(node, Tag): # Some other piece of code decided to pass in a Tag # instead of creating an Element object to contain the # Tag. child = node elif node.element.__class__ == NavigableString: string_child = child = node.element node.parent = self else: child = node.element node.parent = self if not isinstance(child, str) and child.parent is not None: node.element.extract() if (string_child is not None and self.element.contents and self.element.contents[-1].__class__ == NavigableString): # We are appending a string onto another string. # TODO This has O(n^2) performance, for input like # "a</a>a</a>a</a>..." old_element = self.element.contents[-1] new_element = self.soup.new_string(old_element + string_child) old_element.replace_with(new_element) self.soup._most_recent_element = new_element else: if isinstance(node, str): # Create a brand new NavigableString from this string. child = self.soup.new_string(node) # Tell Beautiful Soup to act as if it parsed this element # immediately after the parent's last descendant. (Or # immediately after the parent, if it has no children.) if self.element.contents: most_recent_element = self.element._last_descendant(False) elif self.element.next_element is not None: # Something from further ahead in the parse tree is # being inserted into this earlier element. This is # very annoying because it means an expensive search # for the last element in the tree. most_recent_element = self.soup._last_descendant() else: most_recent_element = self.element self.soup.object_was_parsed( child, parent=self.element, most_recent_element=most_recent_element) def getAttributes(self): if isinstance(self.element, Comment): return {} return AttrList(self.element) def setAttributes(self, attributes): if attributes is not None and len(attributes) > 0: converted_attributes = [] for name, value in list(attributes.items()): if isinstance(name, tuple): new_name = NamespacedAttribute(*name) del attributes[name] attributes[new_name] = value self.soup.builder._replace_cdata_list_attribute_values( self.name, attributes) for name, value in list(attributes.items()): self.element[name] = value # The attributes may contain variables that need substitution. # Call set_up_substitutions manually. # # The Tag constructor called this method when the Tag was created, # but we just set/changed the attributes, so call it again. self.soup.builder.set_up_substitutions(self.element) attributes = property(getAttributes, setAttributes) def insertText(self, data, insertBefore=None): text = TextNode(self.soup.new_string(data), self.soup) if insertBefore: self.insertBefore(text, insertBefore) else: self.appendChild(text) def insertBefore(self, node, refNode): index = self.element.index(refNode.element) if (node.element.__class__ == NavigableString and self.element.contents and self.element.contents[index-1].__class__ == NavigableString): # (See comments in appendChild) old_node = self.element.contents[index-1] new_str = self.soup.new_string(old_node + node.element) old_node.replace_with(new_str) else: self.element.insert(index, node.element) node.parent = self def removeChild(self, node): node.element.extract() def reparentChildren(self, new_parent): """Move all of this tag's children into another tag.""" # print("MOVE", self.element.contents) # print("FROM", self.element) # print("TO", new_parent.element) element = self.element new_parent_element = new_parent.element # Determine what this tag's next_element will be once all the children # are removed. final_next_element = element.next_sibling new_parents_last_descendant = new_parent_element._last_descendant(False, False) if len(new_parent_element.contents) > 0: # The new parent already contains children. We will be # appending this tag's children to the end. new_parents_last_child = new_parent_element.contents[-1] new_parents_last_descendant_next_element = new_parents_last_descendant.next_element else: # The new parent contains no children. new_parents_last_child = None new_parents_last_descendant_next_element = new_parent_element.next_element to_append = element.contents if len(to_append) > 0: # Set the first child's previous_element and previous_sibling # to elements within the new parent first_child = to_append[0] if new_parents_last_descendant is not None: first_child.previous_element = new_parents_last_descendant else: first_child.previous_element = new_parent_element first_child.previous_sibling = new_parents_last_child if new_parents_last_descendant is not None: new_parents_last_descendant.next_element = first_child else: new_parent_element.next_element = first_child if new_parents_last_child is not None: new_parents_last_child.next_sibling = first_child # Find the very last element being moved. It is now the # parent's last descendant. It has no .next_sibling and # its .next_element is whatever the previous last # descendant had. last_childs_last_descendant = to_append[-1]._last_descendant(False, True) last_childs_last_descendant.next_element = new_parents_last_descendant_next_element if new_parents_last_descendant_next_element is not None: # TODO: This code has no test coverage and I'm not sure # how to get html5lib to go through this path, but it's # just the other side of the previous line. new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant last_childs_last_descendant.next_sibling = None for child in to_append: child.parent = new_parent_element new_parent_element.contents.append(child) # Now that this element has no children, change its .next_element. element.contents = [] element.next_element = final_next_element # print("DONE WITH MOVE") # print("FROM", self.element) # print("TO", new_parent_element) def cloneNode(self): tag = self.soup.new_tag(self.element.name, self.namespace) node = Element(tag, self.soup, self.namespace) for key,value in self.attributes: node.attributes[key] = value return node def hasContent(self): return self.element.contents def getNameTuple(self): if self.namespace == None: return namespaces["html"], self.name else: return self.namespace, self.name nameTuple = property(getNameTuple) class TextNode(Element): def __init__(self, element, soup): treebuilder_base.Node.__init__(self, None) self.element = element self.soup = soup def cloneNode(self): raise NotImplementedError
18,748
Python
.py
403
35.372208
159
0.60866
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,493
wavpack.py
rembo10_headphones/lib/mutagen/wavpack.py
# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # 2014 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """WavPack reading and writing. WavPack is a lossless format that uses APEv2 tags. Read * http://www.wavpack.com/ * http://www.wavpack.com/file_format.txt for more information. """ __all__ = ["WavPack", "Open", "delete"] from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen._util import cdata, convert_error class WavPackHeaderError(error): pass RATES = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 192000] class _WavPackHeader(object): def __init__(self, block_size, version, track_no, index_no, total_samples, block_index, block_samples, flags, crc): self.block_size = block_size self.version = version self.track_no = track_no self.index_no = index_no self.total_samples = total_samples self.block_index = block_index self.block_samples = block_samples self.flags = flags self.crc = crc @classmethod @convert_error(IOError, WavPackHeaderError) def from_fileobj(cls, fileobj): """A new _WavPackHeader or raises WavPackHeaderError""" header = fileobj.read(32) if len(header) != 32 or not header.startswith(b"wvpk"): raise WavPackHeaderError("not a WavPack header: %r" % header) block_size = cdata.uint_le(header[4:8]) version = cdata.ushort_le(header[8:10]) track_no = ord(header[10:11]) index_no = ord(header[11:12]) samples = cdata.uint_le(header[12:16]) if samples == 2 ** 32 - 1: samples = -1 block_index = cdata.uint_le(header[16:20]) block_samples = cdata.uint_le(header[20:24]) flags = cdata.uint_le(header[24:28]) crc = cdata.uint_le(header[28:32]) return _WavPackHeader(block_size, version, track_no, index_no, samples, block_index, block_samples, flags, crc) class WavPackInfo(StreamInfo): """WavPack stream information. Attributes: channels (int): number of audio channels (1 or 2) length (float): file length in seconds, as a float sample_rate (int): audio sampling rate in Hz bits_per_sample (int): audio sample size version (int): WavPack stream version """ def __init__(self, fileobj): try: header = _WavPackHeader.from_fileobj(fileobj) except WavPackHeaderError: raise WavPackHeaderError("not a WavPack file") self.version = header.version self.channels = bool(header.flags & 4) or 2 self.sample_rate = RATES[(header.flags >> 23) & 0xF] self.bits_per_sample = ((header.flags & 3) + 1) * 8 # most common multiplier (DSD64) if (header.flags >> 31) & 1: self.sample_rate *= 4 self.bits_per_sample = 1 if header.total_samples == -1 or header.block_index != 0: # TODO: we could make this faster by using the tag size # and search backwards for the last block, then do # last.block_index + last.block_samples - initial.block_index samples = header.block_samples while 1: fileobj.seek(header.block_size - 32 + 8, 1) try: header = _WavPackHeader.from_fileobj(fileobj) except WavPackHeaderError: break samples += header.block_samples else: samples = header.total_samples self.length = float(samples) / self.sample_rate def pprint(self): return u"WavPack, %.2f seconds, %d Hz" % (self.length, self.sample_rate) class WavPack(APEv2File): """WavPack(filething) Arguments: filething (filething) Attributes: info (`WavPackInfo`) """ _Info = WavPackInfo _mimes = ["audio/x-wavpack"] @staticmethod def score(filename, fileobj, header): return header.startswith(b"wvpk") * 2 Open = WavPack
4,426
Python
.py
107
32.850467
78
0.622404
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,494
oggtheora.py
rembo10_headphones/lib/mutagen/oggtheora.py
# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Read and write Ogg Theora comments. This module handles Theora files wrapped in an Ogg bitstream. The first Theora stream found is used. Based on the specification at http://theora.org/doc/Theora_I_spec.pdf. """ __all__ = ["OggTheora", "Open", "delete"] import struct from mutagen import StreamInfo from mutagen._vorbis import VCommentDict from mutagen._util import cdata, get_size, loadfile, convert_error from mutagen._tags import PaddingInfo from mutagen.ogg import OggPage, OggFileType, error as OggError class error(OggError): pass class OggTheoraHeaderError(error): pass class OggTheoraInfo(StreamInfo): """OggTheoraInfo() Ogg Theora stream information. Attributes: length (`float`): File length in seconds, as a float fps (`float`): Video frames per second, as a float bitrate (`int`): Bitrate in bps (int) """ length = 0 fps = 0 bitrate = 0 def __init__(self, fileobj): page = OggPage(fileobj) while not page.packets or \ not page.packets[0].startswith(b"\x80theora"): page = OggPage(fileobj) if not page.first: raise OggTheoraHeaderError( "page has ID header, but doesn't start a stream") data = page.packets[0] if len(data) < 42: raise OggTheoraHeaderError("Truncated header") vmaj, vmin = struct.unpack("2B", data[7:9]) if (vmaj, vmin) != (3, 2): raise OggTheoraHeaderError( "found Theora version %d.%d != 3.2" % (vmaj, vmin)) fps_num, fps_den = struct.unpack(">2I", data[22:30]) if not fps_den or not fps_num: raise OggTheoraHeaderError("FRN or FRD is equal to zero") self.fps = fps_num / float(fps_den) self.bitrate = cdata.uint_be(b"\x00" + data[37:40]) self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F self.serial = page.serial def _post_tags(self, fileobj): page = OggPage.find_last(fileobj, self.serial, finishing=True) if page is None: raise OggTheoraHeaderError position = page.position mask = (1 << self.granule_shift) - 1 frames = (position >> self.granule_shift) + (position & mask) assert self.fps self.length = frames / float(self.fps) def pprint(self): return u"Ogg Theora, %.2f seconds, %d bps" % (self.length, self.bitrate) class OggTheoraCommentDict(VCommentDict): """Theora comments embedded in an Ogg bitstream.""" def __init__(self, fileobj, info): pages = [] complete = False while not complete: page = OggPage(fileobj) if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) packets = OggPage.to_packets(pages) if not packets: raise error("Missing metadata packet") data = packets[0][7:] super(OggTheoraCommentDict, self).__init__(data, framing=False) self._padding = len(data) - self._size def _inject(self, fileobj, padding_func): """Write tag data into the Theora comment packet/page.""" fileobj.seek(0) page = OggPage(fileobj) while not page.packets or \ not page.packets[0].startswith(b"\x81theora"): page = OggPage(fileobj) old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == old_pages[0].serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) content_size = get_size(fileobj) - len(packets[0]) # approx vcomment_data = b"\x81theora" + self.write(framing=False) padding_left = len(packets[0]) - len(vcomment_data) info = PaddingInfo(padding_left, content_size) new_padding = info._get_padding(padding_func) packets[0] = vcomment_data + b"\x00" * new_padding new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) class OggTheora(OggFileType): """OggTheora(filething) An Ogg Theora file. Arguments: filething (filething) Attributes: info (`OggTheoraInfo`) tags (`mutagen._vorbis.VCommentDict`) """ _Info = OggTheoraInfo _Tags = OggTheoraCommentDict _Error = OggTheoraHeaderError _mimes = ["video/x-theora"] info = None tags = None @staticmethod def score(filename, fileobj, header): return (header.startswith(b"OggS") * ((b"\x80theora" in header) + (b"\x81theora" in header)) * 2) Open = OggTheora @convert_error(IOError, error) @loadfile(method=False, writable=True) def delete(filething): """ delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file. """ t = OggTheora(filething) filething.fileobj.seek(0) t.delete(filething)
5,489
Python
.py
138
31.978261
77
0.632372
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,495
_vorbis.py
rembo10_headphones/lib/mutagen/_vorbis.py
# -*- coding: utf-8 -*- # Copyright (C) 2005-2006 Joe Wreschnig # 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Read and write Vorbis comment data. Vorbis comments are freeform key/value pairs; keys are case-insensitive ASCII and values are Unicode strings. A key may have multiple values. The specification is at http://www.xiph.org/vorbis/doc/v-comment.html. """ import sys from io import BytesIO import mutagen from mutagen._util import DictMixin, cdata, MutagenError, reraise def is_valid_key(key): """Return true if a string is a valid Vorbis comment key. Valid Vorbis comment keys are printable ASCII between 0x20 (space) and 0x7D ('}'), excluding '='. Takes str/unicode in Python 2, unicode in Python 3 """ if isinstance(key, bytes): raise TypeError("needs to be str not bytes") for c in key: if c < " " or c > "}" or c == "=": return False else: return bool(key) istag = is_valid_key class error(MutagenError): pass class VorbisUnsetFrameError(error): pass class VorbisEncodingError(error): pass class VComment(mutagen.Tags, list): """A Vorbis comment parser, accessor, and renderer. All comment ordering is preserved. A VComment is a list of key/value pairs, and so any Python list method can be used on it. Vorbis comments are always wrapped in something like an Ogg Vorbis bitstream or a FLAC metadata block, so this loads string data or a file-like object, not a filename. Attributes: vendor (text): the stream 'vendor' (i.e. writer); default 'Mutagen' """ vendor = u"Mutagen " + mutagen.version_string def __init__(self, data=None, *args, **kwargs): self._size = 0 # Collect the args to pass to load, this lets child classes # override just load and get equivalent magic for the # constructor. if data is not None: if isinstance(data, bytes): data = BytesIO(data) elif not hasattr(data, 'read'): raise TypeError("VComment requires bytes or a file-like") start = data.tell() self.load(data, *args, **kwargs) self._size = data.tell() - start def load(self, fileobj, errors='replace', framing=True): """Parse a Vorbis comment from a file-like object. Arguments: errors (str): 'strict', 'replace', or 'ignore'. This affects Unicode decoding and how other malformed content is interpreted. framing (bool): if true, fail if a framing bit is not present Framing bits are required by the Vorbis comment specification, but are not used in FLAC Vorbis comment blocks. """ try: vendor_length = cdata.uint_le(fileobj.read(4)) self.vendor = fileobj.read(vendor_length).decode('utf-8', errors) count = cdata.uint_le(fileobj.read(4)) for i in range(count): length = cdata.uint_le(fileobj.read(4)) try: string = fileobj.read(length).decode('utf-8', errors) except (OverflowError, MemoryError): raise error("cannot read %d bytes, too large" % length) try: tag, value = string.split('=', 1) except ValueError as err: if errors == "ignore": continue elif errors == "replace": tag, value = u"unknown%d" % i, string else: reraise(VorbisEncodingError, err, sys.exc_info()[2]) try: tag = tag.encode('ascii', errors) except UnicodeEncodeError: raise VorbisEncodingError("invalid tag name %r" % tag) else: tag = tag.decode("ascii") if is_valid_key(tag): self.append((tag, value)) if framing and not bytearray(fileobj.read(1))[0] & 0x01: raise VorbisUnsetFrameError("framing bit was unset") except (cdata.error, TypeError): raise error("file is not a valid Vorbis comment") def validate(self): """Validate keys and values. Check to make sure every key used is a valid Vorbis key, and that every value used is a valid Unicode or UTF-8 string. If any invalid keys or values are found, a ValueError is raised. In Python 3 all keys and values have to be a string. """ if not isinstance(self.vendor, str): raise ValueError("vendor needs to be str") for key, value in self: try: if not is_valid_key(key): raise ValueError("%r is not a valid key" % key) except TypeError: raise ValueError("%r is not a valid key" % key) if not isinstance(value, str): err = "%r needs to be str for key %r" % (value, key) raise ValueError(err) return True def clear(self): """Clear all keys from the comment.""" for i in list(self): self.remove(i) def write(self, framing=True): """Return a string representation of the data. Validation is always performed, so calling this function on invalid data may raise a ValueError. Arguments: framing (bool): if true, append a framing bit (see load) """ self.validate() def _encode(value): if not isinstance(value, bytes): return value.encode('utf-8') return value f = BytesIO() vendor = _encode(self.vendor) f.write(cdata.to_uint_le(len(vendor))) f.write(vendor) f.write(cdata.to_uint_le(len(self))) for tag, value in self: tag = _encode(tag) value = _encode(value) comment = tag + b"=" + value f.write(cdata.to_uint_le(len(comment))) f.write(comment) if framing: f.write(b"\x01") return f.getvalue() def pprint(self): def _decode(value): if not isinstance(value, str): return value.decode('utf-8', 'replace') return value tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self] return u"\n".join(tags) class VCommentDict(VComment, DictMixin): """A VComment that looks like a dictionary. This object differs from a dictionary in two ways. First, len(comment) will still return the number of values, not the number of keys. Secondly, iterating through the object will iterate over (key, value) pairs, not keys. Since a key may have multiple values, the same value may appear multiple times while iterating. Since Vorbis comment keys are case-insensitive, all keys are normalized to lowercase ASCII. """ def __getitem__(self, key): """A list of values for the key. This is a copy, so comment['title'].append('a title') will not work. """ if isinstance(key, slice): return VComment.__getitem__(self, key) if not is_valid_key(key): raise ValueError key = key.lower() values = [value for (k, value) in self if k.lower() == key] if not values: raise KeyError(key) else: return values def __delitem__(self, key): """Delete all values associated with the key.""" if isinstance(key, slice): return VComment.__delitem__(self, key) if not is_valid_key(key): raise ValueError key = key.lower() to_delete = [x for x in self if x[0].lower() == key] if not to_delete: raise KeyError(key) else: for item in to_delete: self.remove(item) def __contains__(self, key): """Return true if the key has any values.""" if not is_valid_key(key): raise ValueError key = key.lower() for k, value in self: if k.lower() == key: return True else: return False def __setitem__(self, key, values): """Set a key's value or values. Setting a value overwrites all old ones. The value may be a list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 string. """ if isinstance(key, slice): return VComment.__setitem__(self, key, values) if not is_valid_key(key): raise ValueError if not isinstance(values, list): values = [values] try: del(self[key]) except KeyError: pass for value in values: self.append((key, value)) def keys(self): """Return all keys in the comment.""" return list(set([k.lower() for k, v in self])) def as_dict(self): """Return a copy of the comment data in a real dict.""" return dict([(key, self[key]) for key in self.keys()])
9,496
Python
.py
232
30.810345
77
0.585111
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,496
musepack.py
rembo10_headphones/lib/mutagen/musepack.py
# -*- coding: utf-8 -*- # Copyright (C) 2006 Lukas Lalinsky # Copyright (C) 2012 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Musepack audio streams with APEv2 tags. Musepack is an audio format originally based on the MPEG-1 Layer-2 algorithms. Stream versions 4 through 7 are supported. For more information, see http://www.musepack.net/. """ __all__ = ["Musepack", "Open", "delete"] import struct from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen.id3._util import BitPaddedInt from mutagen._util import cdata, convert_error, intround, endswith class MusepackHeaderError(error): pass RATES = [44100, 48000, 37800, 32000] def _parse_sv8_int(fileobj, limit=9): """Reads (max limit) bytes from fileobj until the MSB is zero. All 7 LSB will be merged to a big endian uint. Raises ValueError in case not MSB is zero, or EOFError in case the file ended before limit is reached. Returns (parsed number, number of bytes read) """ num = 0 for i in range(limit): c = fileobj.read(1) if len(c) != 1: raise EOFError c = bytearray(c) num = (num << 7) | (c[0] & 0x7F) if not c[0] & 0x80: return num, i + 1 if limit > 0: raise ValueError return 0, 0 def _calc_sv8_gain(gain): # 64.82 taken from mpcdec return 64.82 - gain / 256.0 def _calc_sv8_peak(peak): return (10 ** (peak / (256.0 * 20.0)) / 65535.0) class MusepackInfo(StreamInfo): """MusepackInfo() Musepack stream information. Attributes: channels (`int`): number of audio channels length (`float`): file length in seconds, as a float sample_rate (`int`): audio sampling rate in Hz bitrate (`int`): audio bitrate, in bits per second version (`int`) Musepack stream version Optional Attributes: Attributes: title_gain (`float`): Replay Gain for this song title_peak (`float`): Peak data for this song album_gain (`float`): Replay Gain for this album album_peak (`float`): Peak data for this album These attributes are only available in stream version 7/8. The gains are a float, +/- some dB. The peaks are a percentage [0..1] of the maximum amplitude. This means to get a number comparable to VorbisGain, you must multiply the peak by 2. """ @convert_error(IOError, MusepackHeaderError) def __init__(self, fileobj): """Raises MusepackHeaderError""" header = fileobj.read(4) if len(header) != 4: raise MusepackHeaderError("not a Musepack file") # Skip ID3v2 tags if header[:3] == b"ID3": header = fileobj.read(6) if len(header) != 6: raise MusepackHeaderError("not a Musepack file") size = 10 + BitPaddedInt(header[2:6]) fileobj.seek(size) header = fileobj.read(4) if len(header) != 4: raise MusepackHeaderError("not a Musepack file") if header.startswith(b"MPCK"): self.__parse_sv8(fileobj) else: self.__parse_sv467(fileobj) if not self.bitrate and self.length != 0: fileobj.seek(0, 2) self.bitrate = intround(fileobj.tell() * 8 / self.length) def __parse_sv8(self, fileobj): # SV8 http://trac.musepack.net/trac/wiki/SV8Specification key_size = 2 mandatory_packets = [b"SH", b"RG"] def check_frame_key(key): if ((len(frame_type) != key_size) or (not b'AA' <= frame_type <= b'ZZ')): raise MusepackHeaderError("Invalid frame key.") frame_type = fileobj.read(key_size) check_frame_key(frame_type) while frame_type not in (b"AP", b"SE") and mandatory_packets: try: frame_size, slen = _parse_sv8_int(fileobj) except (EOFError, ValueError): raise MusepackHeaderError("Invalid packet size.") data_size = frame_size - key_size - slen # packets can be at maximum data_size big and are padded with zeros if frame_type == b"SH": if frame_type not in mandatory_packets: raise MusepackHeaderError("Duplicate SH packet") mandatory_packets.remove(frame_type) self.__parse_stream_header(fileobj, data_size) elif frame_type == b"RG": if frame_type not in mandatory_packets: raise MusepackHeaderError("Duplicate RG packet") mandatory_packets.remove(frame_type) self.__parse_replaygain_packet(fileobj, data_size) else: fileobj.seek(data_size, 1) frame_type = fileobj.read(key_size) check_frame_key(frame_type) if mandatory_packets: raise MusepackHeaderError("Missing mandatory packets: %s." % ", ".join(map(repr, mandatory_packets))) self.length = float(self.samples) / self.sample_rate self.bitrate = 0 def __parse_stream_header(self, fileobj, data_size): # skip CRC fileobj.seek(4, 1) remaining_size = data_size - 4 try: self.version = bytearray(fileobj.read(1))[0] except (TypeError, IndexError): raise MusepackHeaderError("SH packet ended unexpectedly.") remaining_size -= 1 try: samples, l1 = _parse_sv8_int(fileobj) samples_skip, l2 = _parse_sv8_int(fileobj) except (EOFError, ValueError): raise MusepackHeaderError( "SH packet: Invalid sample counts.") self.samples = samples - samples_skip remaining_size -= l1 + l2 data = fileobj.read(remaining_size) if len(data) != remaining_size or len(data) < 2: raise MusepackHeaderError("SH packet ended unexpectedly.") rate_index = (bytearray(data)[0] >> 5) try: self.sample_rate = RATES[rate_index] except IndexError: raise MusepackHeaderError("Invalid sample rate") self.channels = (bytearray(data)[1] >> 4) + 1 def __parse_replaygain_packet(self, fileobj, data_size): data = fileobj.read(data_size) if data_size < 9: raise MusepackHeaderError("Invalid RG packet size.") if len(data) != data_size: raise MusepackHeaderError("RG packet ended unexpectedly.") title_gain = cdata.short_be(data[1:3]) title_peak = cdata.short_be(data[3:5]) album_gain = cdata.short_be(data[5:7]) album_peak = cdata.short_be(data[7:9]) if title_gain: self.title_gain = _calc_sv8_gain(title_gain) if title_peak: self.title_peak = _calc_sv8_peak(title_peak) if album_gain: self.album_gain = _calc_sv8_gain(album_gain) if album_peak: self.album_peak = _calc_sv8_peak(album_peak) def __parse_sv467(self, fileobj): fileobj.seek(-4, 1) header = fileobj.read(32) if len(header) != 32: raise MusepackHeaderError("not a Musepack file") # SV7 if header.startswith(b"MP+"): self.version = bytearray(header)[3] & 0xF if self.version < 7: raise MusepackHeaderError("not a Musepack file") frames = cdata.uint_le(header[4:8]) flags = cdata.uint_le(header[8:12]) self.title_peak, self.title_gain = struct.unpack( "<Hh", header[12:16]) self.album_peak, self.album_gain = struct.unpack( "<Hh", header[16:20]) self.title_gain /= 100.0 self.album_gain /= 100.0 self.title_peak /= 65535.0 self.album_peak /= 65535.0 self.sample_rate = RATES[(flags >> 16) & 0x0003] self.bitrate = 0 # SV4-SV6 else: header_dword = cdata.uint_le(header[0:4]) self.version = (header_dword >> 11) & 0x03FF if self.version < 4 or self.version > 6: raise MusepackHeaderError("not a Musepack file") self.bitrate = (header_dword >> 23) & 0x01FF self.sample_rate = 44100 if self.version >= 5: frames = cdata.uint_le(header[4:8]) else: frames = cdata.ushort_le(header[6:8]) if self.version < 6: frames -= 1 self.channels = 2 self.length = float(frames * 1152 - 576) / self.sample_rate def pprint(self): rg_data = [] if hasattr(self, "title_gain"): rg_data.append(u"%+0.2f (title)" % self.title_gain) if hasattr(self, "album_gain"): rg_data.append(u"%+0.2f (album)" % self.album_gain) rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or "" return u"Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % ( self.version, self.length, self.sample_rate, self.bitrate, rg_data) class Musepack(APEv2File): """Musepack(filething) Arguments: filething (filething) Attributes: info (`MusepackInfo`) """ _Info = MusepackInfo _mimes = ["audio/x-musepack", "audio/x-mpc"] @staticmethod def score(filename, fileobj, header): filename = filename.lower() return (header.startswith(b"MP+") + header.startswith(b"MPCK") + endswith(filename, b".mpc")) Open = Musepack
9,845
Python
.py
231
32.874459
79
0.595373
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,497
oggspeex.py
rembo10_headphones/lib/mutagen/oggspeex.py
# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Read and write Ogg Speex comments. This module handles Speex files wrapped in an Ogg bitstream. The first Speex stream found is used. Read more about Ogg Speex at http://www.speex.org/. This module is based on the specification at http://www.speex.org/manual2/node7.html and clarifications after personal communication with Jean-Marc, http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html. """ __all__ = ["OggSpeex", "Open", "delete"] from mutagen import StreamInfo from mutagen._vorbis import VCommentDict from mutagen.ogg import OggPage, OggFileType, error as OggError from mutagen._util import cdata, get_size, loadfile, convert_error from mutagen._tags import PaddingInfo class error(OggError): pass class OggSpeexHeaderError(error): pass class OggSpeexInfo(StreamInfo): """OggSpeexInfo() Ogg Speex stream information. Attributes: length (`float`): file length in seconds, as a float channels (`int`): number of channels bitrate (`int`): nominal bitrate in bits per second. The reference encoder does not set the bitrate; in this case, the bitrate will be 0. """ length = 0 channels = 0 bitrate = 0 def __init__(self, fileobj): page = OggPage(fileobj) while not page.packets[0].startswith(b"Speex "): page = OggPage(fileobj) if not page.first: raise OggSpeexHeaderError( "page has ID header, but doesn't start a stream") self.sample_rate = cdata.uint_le(page.packets[0][36:40]) self.channels = cdata.uint_le(page.packets[0][48:52]) self.bitrate = max(0, cdata.int_le(page.packets[0][52:56])) self.serial = page.serial def _post_tags(self, fileobj): page = OggPage.find_last(fileobj, self.serial, finishing=True) if page is None: raise OggSpeexHeaderError self.length = page.position / float(self.sample_rate) def pprint(self): return u"Ogg Speex, %.2f seconds" % self.length class OggSpeexVComment(VCommentDict): """Speex comments embedded in an Ogg bitstream.""" def __init__(self, fileobj, info): pages = [] complete = False while not complete: page = OggPage(fileobj) if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) data = OggPage.to_packets(pages)[0] super(OggSpeexVComment, self).__init__(data, framing=False) self._padding = len(data) - self._size def _inject(self, fileobj, padding_func): """Write tag data into the Speex comment packet/page.""" fileobj.seek(0) # Find the first header page, with the stream info. # Use it to get the serial number. page = OggPage(fileobj) while not page.packets[0].startswith(b"Speex "): page = OggPage(fileobj) # Look for the next page with that serial number, it'll start # the comment packet. serial = page.serial page = OggPage(fileobj) while page.serial != serial: page = OggPage(fileobj) # Then find all the pages with the comment packet. old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == old_pages[0].serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) content_size = get_size(fileobj) - len(packets[0]) # approx vcomment_data = self.write(framing=False) padding_left = len(packets[0]) - len(vcomment_data) info = PaddingInfo(padding_left, content_size) new_padding = info._get_padding(padding_func) # Set the new comment packet. packets[0] = vcomment_data + b"\x00" * new_padding new_pages = OggPage._from_packets_try_preserve(packets, old_pages) OggPage.replace(fileobj, old_pages, new_pages) class OggSpeex(OggFileType): """OggSpeex(filething) An Ogg Speex file. Arguments: filething (filething) Attributes: info (`OggSpeexInfo`) tags (`mutagen._vorbis.VCommentDict`) """ _Info = OggSpeexInfo _Tags = OggSpeexVComment _Error = OggSpeexHeaderError _mimes = ["audio/x-speex"] info = None tags = None @staticmethod def score(filename, fileobj, header): return (header.startswith(b"OggS") * (b"Speex " in header)) Open = OggSpeex @convert_error(IOError, error) @loadfile(method=False, writable=True) def delete(filething): """ delete(filething) Arguments: filething (filething) Raises: mutagen.MutagenError Remove tags from a file. """ t = OggSpeex(filething) filething.fileobj.seek(0) t.delete(filething)
5,235
Python
.py
131
32.969466
77
0.658431
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,498
monkeysaudio.py
rembo10_headphones/lib/mutagen/monkeysaudio.py
# -*- coding: utf-8 -*- # Copyright (C) 2006 Lukas Lalinsky # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Monkey's Audio streams with APEv2 tags. Monkey's Audio is a very efficient lossless audio compressor developed by Matt Ashland. For more information, see http://www.monkeysaudio.com/. """ __all__ = ["MonkeysAudio", "Open", "delete"] import struct from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen._util import cdata, convert_error, endswith class MonkeysAudioHeaderError(error): pass class MonkeysAudioInfo(StreamInfo): """MonkeysAudioInfo() Monkey's Audio stream information. Attributes: channels (`int`): number of audio channels length (`float`): file length in seconds, as a float sample_rate (`int`): audio sampling rate in Hz bits_per_sample (`int`): bits per sample version (`float`): Monkey's Audio stream version, as a float (eg: 3.99) """ @convert_error(IOError, MonkeysAudioHeaderError) def __init__(self, fileobj): """Raises MonkeysAudioHeaderError""" header = fileobj.read(76) if len(header) != 76 or not header.startswith(b"MAC "): raise MonkeysAudioHeaderError("not a Monkey's Audio file") self.version = cdata.ushort_le(header[4:6]) if self.version >= 3980: (blocks_per_frame, final_frame_blocks, total_frames, self.bits_per_sample, self.channels, self.sample_rate) = struct.unpack("<IIIHHI", header[56:76]) else: compression_level = cdata.ushort_le(header[6:8]) self.channels, self.sample_rate = struct.unpack( "<HI", header[10:16]) total_frames, final_frame_blocks = struct.unpack( "<II", header[24:32]) if self.version >= 3950: blocks_per_frame = 73728 * 4 elif self.version >= 3900 or (self.version >= 3800 and compression_level == 4): blocks_per_frame = 73728 else: blocks_per_frame = 9216 self.bits_per_sample = 0 if header[48:].startswith(b"WAVEfmt"): self.bits_per_sample = struct.unpack("<H", header[74:76])[0] self.version /= 1000.0 self.length = 0.0 if (self.sample_rate != 0) and (total_frames > 0): total_blocks = ((total_frames - 1) * blocks_per_frame + final_frame_blocks) self.length = float(total_blocks) / self.sample_rate def pprint(self): return u"Monkey's Audio %.2f, %.2f seconds, %d Hz" % ( self.version, self.length, self.sample_rate) class MonkeysAudio(APEv2File): """MonkeysAudio(filething) Arguments: filething (filething) Attributes: info (`MonkeysAudioInfo`) """ _Info = MonkeysAudioInfo _mimes = ["audio/ape", "audio/x-ape"] @staticmethod def score(filename, fileobj, header): return header.startswith(b"MAC ") + endswith(filename.lower(), ".ape") Open = MonkeysAudio
3,352
Python
.py
78
34.551282
79
0.627499
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
8,499
trueaudio.py
rembo10_headphones/lib/mutagen/trueaudio.py
# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """True Audio audio stream information and tags. True Audio is a lossless format designed for real-time encoding and decoding. This module is based on the documentation at http://www.true-audio.com/TTA_Lossless_Audio_Codec\\_-_Format_Description True Audio files use ID3 tags. """ __all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"] from mutagen import StreamInfo from mutagen.id3 import ID3FileType, delete from mutagen._util import cdata, MutagenError, convert_error, endswith class error(MutagenError): pass class TrueAudioHeaderError(error): pass class TrueAudioInfo(StreamInfo): """TrueAudioInfo() True Audio stream information. Attributes: length (`float`): audio length, in seconds sample_rate (`int`): audio sample rate, in Hz """ @convert_error(IOError, TrueAudioHeaderError) def __init__(self, fileobj, offset): """Raises TrueAudioHeaderError""" fileobj.seek(offset or 0) header = fileobj.read(18) if len(header) != 18 or not header.startswith(b"TTA"): raise TrueAudioHeaderError("TTA header not found") self.sample_rate = cdata.int_le(header[10:14]) samples = cdata.uint_le(header[14:18]) self.length = float(samples) / self.sample_rate def pprint(self): return u"True Audio, %.2f seconds, %d Hz." % ( self.length, self.sample_rate) class TrueAudio(ID3FileType): """TrueAudio(filething, ID3=None) A True Audio file. Arguments: filething (filething) ID3 (mutagen.id3.ID3) Attributes: info (`TrueAudioInfo`) tags (`mutagen.id3.ID3`) """ _Info = TrueAudioInfo _mimes = ["audio/x-tta"] @staticmethod def score(filename, fileobj, header): return (header.startswith(b"ID3") + header.startswith(b"TTA") + endswith(filename.lower(), b".tta") * 2) Open = TrueAudio class EasyTrueAudio(TrueAudio): """EasyTrueAudio(filething, ID3=None) Like MP3, but uses EasyID3 for tags. Arguments: filething (filething) ID3 (mutagen.id3.ID3) Attributes: info (`TrueAudioInfo`) tags (`mutagen.easyid3.EasyID3`) """ from mutagen.easyid3 import EasyID3 as ID3 ID3 = ID3
2,594
Python
.py
70
31.442857
73
0.683915
rembo10/headphones
3,370
601
527
GPL-3.0
9/5/2024, 5:10:38 PM (Europe/Amsterdam)