id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
20,800
base.py
evilhero_mylar/lib/apscheduler/schedulers/base.py
from __future__ import print_function from abc import ABCMeta, abstractmethod from collections import MutableMapping from threading import RLock from datetime import datetime, timedelta from logging import getLogger import warnings import sys from pkg_resources import iter_entry_points from tzlocal import get_localzone import six from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.job import Job from apscheduler.triggers.base import BaseTrigger from apscheduler.util import asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined from apscheduler.events import ( SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) #: constant indicating a scheduler's stopped state STATE_STOPPED = 0 #: constant indicating a scheduler's running state (started and processing jobs) STATE_RUNNING = 1 #: constant indicating a scheduler's paused state (started but not processing jobs) STATE_PAUSED = 2 class BaseScheduler(six.with_metaclass(ABCMeta)): """ Abstract base class for all schedulers. Takes the following keyword arguments: :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to apscheduler.scheduler) :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone) :param int|float jobstore_retry_interval: the minimum number of seconds to wait between retries in the scheduler's main loop if the job store raises an exception when getting the list of due jobs :param dict job_defaults: default values for newly added jobs :param dict jobstores: a dictionary of job store alias -> job store instance or configuration dict :param dict executors: a dictionary of executor alias -> executor instance or configuration dict :ivar int state: current running state of the scheduler (one of the following constants from ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``) .. seealso:: :ref:`scheduler-config` """ _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers')) _trigger_classes = {} _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors')) _executor_classes = {} _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores')) _jobstore_classes = {} # # Public API # def __init__(self, gconfig={}, **options): super(BaseScheduler, self).__init__() self._executors = {} self._executors_lock = self._create_lock() self._jobstores = {} self._jobstores_lock = self._create_lock() self._listeners = [] self._listeners_lock = self._create_lock() self._pending_jobs = [] self.state = STATE_STOPPED self.configure(gconfig, **options) def configure(self, gconfig={}, prefix='apscheduler.', **options): """ Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running. :param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to this method :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string (pass an empty string or ``None`` to use all keys) :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError # If a non-empty prefix was given, strip it from the keys in the # global configuration dict if prefix: prefixlen = len(prefix) gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) if key.startswith(prefix)) # Create a structure from the dotted options # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) config = {} for key, value in six.iteritems(gconfig): parts = key.split('.') parent = config key = parts.pop(0) while parts: parent = parent.setdefault(key, {}) key = parts.pop(0) parent[key] = value # Override any options with explicit keyword arguments config.update(options) self._configure(config) def start(self, paused=False): """ Start the configured executors and job stores and begin processing scheduled jobs. :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError with self._executors_lock: # Create a default executor if nothing else is configured if 'default' not in self._executors: self.add_executor(self._create_default_executor(), 'default') # Start all the executors for alias, executor in six.iteritems(self._executors): executor.start(self, alias) with self._jobstores_lock: # Create a default job store if nothing else is configured if 'default' not in self._jobstores: self.add_jobstore(self._create_default_jobstore(), 'default') # Start all the job stores for alias, store in six.iteritems(self._jobstores): store.start(self, alias) # Schedule all pending jobs for job, jobstore_alias, replace_existing in self._pending_jobs: self._real_add_job(job, jobstore_alias, replace_existing) del self._pending_jobs[:] self.state = STATE_PAUSED if paused else STATE_RUNNING self._logger.info('Scheduler started') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) if not paused: self.wakeup() @abstractmethod def shutdown(self, wait=True): """ Shuts down the scheduler, along with its executors and job stores. Does not interrupt any currently running jobs. :param bool wait: ``True`` to wait until all currently executing jobs have finished :raises SchedulerNotRunningError: if the scheduler has not been started yet """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError self.state = STATE_STOPPED with self._jobstores_lock, self._executors_lock: # Shut down all executors for executor in six.itervalues(self._executors): executor.shutdown(wait) # Shut down all job stores for jobstore in six.itervalues(self._jobstores): jobstore.shutdown() self._logger.info('Scheduler has been shut down') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) def pause(self): """ Pause job processing in the scheduler. This will prevent the scheduler from waking up to do job processing until :meth:`resume` is called. It will not however stop any already running job processing. """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_RUNNING: self.state = STATE_PAUSED self._logger.info('Paused scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED)) def resume(self): """Resume job processing in the scheduler.""" if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_PAUSED: self.state = STATE_RUNNING self._logger.info('Resumed scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED)) self.wakeup() @property def running(self): """ Return ``True`` if the scheduler has been started. This is a shortcut for ``scheduler.state != STATE_STOPPED``. """ return self.state != STATE_STOPPED def add_executor(self, executor, alias='default', **executor_opts): """ Adds an executor to this scheduler. Any extra keyword arguments will be passed to the executor plugin's constructor, assuming that the first argument is the name of an executor plugin. :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor instance or the name of an executor plugin :param str|unicode alias: alias for the scheduler :raises ValueError: if there is already an executor by the given alias """ with self._executors_lock: if alias in self._executors: raise ValueError('This scheduler already has an executor by the alias of "%s"' % alias) if isinstance(executor, BaseExecutor): self._executors[alias] = executor elif isinstance(executor, six.string_types): self._executors[alias] = executor = self._create_plugin_instance( 'executor', executor, executor_opts) else: raise TypeError('Expected an executor instance or a string, got %s instead' % executor.__class__.__name__) # Start the executor right away if the scheduler is running if self.state != STATE_STOPPED: executor.start(self, alias) self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) def remove_executor(self, alias, shutdown=True): """ Removes the executor by the given alias from this scheduler. :param str|unicode alias: alias of the executor :param bool shutdown: ``True`` to shut down the executor after removing it """ with self._executors_lock: executor = self._lookup_executor(alias) del self._executors[alias] if shutdown: executor.shutdown() self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) def add_jobstore(self, jobstore, alias='default', **jobstore_opts): """ Adds a job store to this scheduler. Any extra keyword arguments will be passed to the job store plugin's constructor, assuming that the first argument is the name of a job store plugin. :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added :param str|unicode alias: alias for the job store :raises ValueError: if there is already a job store by the given alias """ with self._jobstores_lock: if alias in self._jobstores: raise ValueError('This scheduler already has a job store by the alias of "%s"' % alias) if isinstance(jobstore, BaseJobStore): self._jobstores[alias] = jobstore elif isinstance(jobstore, six.string_types): self._jobstores[alias] = jobstore = self._create_plugin_instance( 'jobstore', jobstore, jobstore_opts) else: raise TypeError('Expected a job store instance or a string, got %s instead' % jobstore.__class__.__name__) # Start the job store right away if the scheduler isn't stopped if self.state != STATE_STOPPED: jobstore.start(self, alias) # Notify listeners that a new job store has been added self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) # Notify the scheduler so it can scan the new job store for jobs if self.state != STATE_STOPPED: self.wakeup() def remove_jobstore(self, alias, shutdown=True): """ Removes the job store by the given alias from this scheduler. :param str|unicode alias: alias of the job store :param bool shutdown: ``True`` to shut down the job store after removing it """ with self._jobstores_lock: jobstore = self._lookup_jobstore(alias) del self._jobstores[alias] if shutdown: jobstore.shutdown() self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) def add_listener(self, callback, mask=EVENT_ALL): """ add_listener(callback, mask=EVENT_ALL) Adds a listener for scheduler events. When a matching event occurs, ``callback`` is executed with the event object as its sole argument. If the ``mask`` parameter is not provided, the callback will receive events of all types. :param callback: any callable that takes one argument :param int mask: bitmask that indicates which events should be listened to .. seealso:: :mod:`apscheduler.events` .. seealso:: :ref:`scheduler-events` """ with self._listeners_lock: self._listeners.append((callback, mask)) def remove_listener(self, callback): """Removes a previously added event listener.""" with self._listeners_lock: for i, (cb, _) in enumerate(self._listeners): if callback == cb: del self._listeners[i] def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', replace_existing=False, **trigger_args): """ add_job(func, trigger=None, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, coalesce=undefined, \ max_instances=undefined, next_run_time=undefined, \ jobstore='default', executor='default', \ replace_existing=False, **trigger_args) Adds the given job to the job list and wakes up the scheduler if it's already running. Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running). The ``func`` argument can be given either as a callable object or a textual reference in the ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the second half is a reference to the callable object, relative to the module. The ``trigger`` argument can either be: #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword arguments to this method are passed on to the trigger's constructor #. an instance of a trigger class :param func: callable (or a textual reference to one) to run at the given time :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called :param list|tuple args: list of positional arguments to call func with :param dict kwargs: dict of keyword arguments to call func with :param str|unicode id: explicit identifier for the job (for modifying it later) :param str|unicode name: textual description of the job :param int misfire_grace_time: seconds after the designated runtime that the job is still allowed to be run :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession :param int max_instances: maximum number of concurrently running instances allowed for this job :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the job as paused) :param str|unicode jobstore: alias of the job store to store the job in :param str|unicode executor: alias of the executor to run the job with :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the number of runs from the existing one) :rtype: Job """ job_kwargs = { 'trigger': self._create_trigger(trigger, trigger_args), 'executor': executor, 'func': func, 'args': tuple(args) if args is not None else (), 'kwargs': dict(kwargs) if kwargs is not None else {}, 'id': id, 'name': name, 'misfire_grace_time': misfire_grace_time, 'coalesce': coalesce, 'max_instances': max_instances, 'next_run_time': next_run_time } job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined) job = Job(self, **job_kwargs) # Don't really add jobs to job stores before the scheduler is up and running with self._jobstores_lock: if self.state == STATE_STOPPED: self._pending_jobs.append((job, jobstore, replace_existing)) self._logger.info('Adding job tentatively -- it will be properly scheduled when ' 'the scheduler starts') else: self._real_add_job(job, jobstore, replace_existing) return job def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', **trigger_args): """ scheduled_job(trigger, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, \ coalesce=undefined, max_instances=undefined, \ next_run_time=undefined, jobstore='default', \ executor='default',**trigger_args) A decorator version of :meth:`add_job`, except that ``replace_existing`` is always ``True``. .. important:: The ``id`` argument must be given if scheduling a job in a persistent job store. The scheduler cannot, however, enforce this requirement. """ def inner(func): self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, max_instances, next_run_time, jobstore, executor, True, **trigger_args) return func return inner def modify_job(self, job_id, jobstore=None, **changes): """ Modifies the properties of a single job. Modifications are passed to this method as extra keyword arguments. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) job._modify(**changes) if jobstore: self._lookup_jobstore(jobstore).update_job(job) self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore)) # Wake up the scheduler since the job's next run time may have been changed if self.state == STATE_RUNNING: self.wakeup() return job def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): """ Constructs a new trigger for a job and updates its next run time. Extra keyword arguments are passed directly to the trigger's constructor. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :param trigger: alias of the trigger type or a trigger instance :return Job: the relevant job instance """ trigger = self._create_trigger(trigger, trigger_args) now = datetime.now(self.timezone) next_run_time = trigger.get_next_fire_time(None, now) return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) def pause_job(self, job_id, jobstore=None): """ Causes the given job not to be executed until it is explicitly resumed. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ return self.modify_job(job_id, jobstore, next_run_time=None) def resume_job(self, job_id, jobstore=None): """ Resumes the schedule of the given job, or removes the job if its schedule is finished. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no next run time could be calculated and the job was removed """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) now = datetime.now(self.timezone) next_run_time = job.trigger.get_next_fire_time(None, now) if next_run_time: return self.modify_job(job_id, jobstore, next_run_time=next_run_time) else: self.remove_job(job.id, jobstore) def get_jobs(self, jobstore=None, pending=None): """ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a specific job store or from all of them. If the scheduler has not been started yet, only pending jobs can be returned because the job stores haven't been started yet either. :param str|unicode jobstore: alias of the job store :param bool pending: **DEPRECATED** :rtype: list[Job] """ if pending is not None: warnings.warn('The "pending" option is deprecated -- get_jobs() always returns ' 'pending jobs if the scheduler has been started and scheduled jobs ' 'otherwise', DeprecationWarning) with self._jobstores_lock: jobs = [] if self.state == STATE_STOPPED: for job, alias, replace_existing in self._pending_jobs: if jobstore is None or alias == jobstore: jobs.append(job) else: for alias, store in six.iteritems(self._jobstores): if jobstore is None or alias == jobstore: jobs.extend(store.get_all_jobs()) return jobs def get_job(self, job_id, jobstore=None): """ Returns the Job that matches the given ``job_id``. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that most likely contains the job :return: the Job by the given ID, or ``None`` if it wasn't found :rtype: Job """ with self._jobstores_lock: try: return self._lookup_job(job_id, jobstore)[0] except JobLookupError: return def remove_job(self, job_id, jobstore=None): """ Removes a job, preventing it from being run any more. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :raises JobLookupError: if the job was not found """ jobstore_alias = None with self._jobstores_lock: if self.state == STATE_STOPPED: # Check if the job is among the pending jobs if self.state == STATE_STOPPED: for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): if job.id == job_id and jobstore in (None, alias): del self._pending_jobs[i] jobstore_alias = alias break else: # Otherwise, try to remove it from each store until it succeeds or we run out of # stores to check for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): try: store.remove_job(job_id) jobstore_alias = alias break except JobLookupError: continue if jobstore_alias is None: raise JobLookupError(job_id) # Notify listeners that a job has been removed event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias) self._dispatch_event(event) self._logger.info('Removed job %s', job_id) def remove_all_jobs(self, jobstore=None): """ Removes all jobs from the specified job store, or all job stores if none is given. :param str|unicode jobstore: alias of the job store """ with self._jobstores_lock: if self.state == STATE_STOPPED: if jobstore: self._pending_jobs = [pending for pending in self._pending_jobs if pending[1] != jobstore] else: self._pending_jobs = [] else: for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): store.remove_all_jobs() self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) def print_jobs(self, jobstore=None, out=None): """ print_jobs(jobstore=None, out=sys.stdout) Prints out a textual listing of all jobs currently scheduled on either all job stores or just a specific one. :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given) """ out = out or sys.stdout with self._jobstores_lock: if self.state == STATE_STOPPED: print(u'Pending jobs:', file=out) if self._pending_jobs: for job, jobstore_alias, replace_existing in self._pending_jobs: if jobstore in (None, jobstore_alias): print(u' %s' % job, file=out) else: print(u' No pending jobs', file=out) else: for alias, store in sorted(six.iteritems(self._jobstores)): if jobstore in (None, alias): print(u'Jobstore %s:' % alias, file=out) jobs = store.get_all_jobs() if jobs: for job in jobs: print(u' %s' % job, file=out) else: print(u' No scheduled jobs', file=out) @abstractmethod def wakeup(self): """ Notifies the scheduler that there may be jobs due for execution. Triggers :meth:`_process_jobs` to be run in an implementation specific manner. """ # # Private API # def _configure(self, config): # Set general options self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler') self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10)) # Set the job defaults job_defaults = config.get('job_defaults', {}) self._job_defaults = { 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), 'coalesce': asbool(job_defaults.get('coalesce', True)), 'max_instances': asint(job_defaults.get('max_instances', 1)) } # Configure executors self._executors.clear() for alias, value in six.iteritems(config.get('executors', {})): if isinstance(value, BaseExecutor): self.add_executor(value, alias) elif isinstance(value, MutableMapping): executor_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: executor = self._create_plugin_instance('executor', plugin, value) elif executor_class: cls = maybe_ref(executor_class) executor = cls(**value) else: raise ValueError( 'Cannot create executor "%s" -- either "type" or "class" must be defined' % alias) self.add_executor(executor, alias) else: raise TypeError( "Expected executor instance or dict for executors['%s'], got %s instead" % (alias, value.__class__.__name__)) # Configure job stores self._jobstores.clear() for alias, value in six.iteritems(config.get('jobstores', {})): if isinstance(value, BaseJobStore): self.add_jobstore(value, alias) elif isinstance(value, MutableMapping): jobstore_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: jobstore = self._create_plugin_instance('jobstore', plugin, value) elif jobstore_class: cls = maybe_ref(jobstore_class) jobstore = cls(**value) else: raise ValueError( 'Cannot create job store "%s" -- either "type" or "class" must be ' 'defined' % alias) self.add_jobstore(jobstore, alias) else: raise TypeError( "Expected job store instance or dict for jobstores['%s'], got %s instead" % (alias, value.__class__.__name__)) def _create_default_executor(self): """Creates a default executor store, specific to the particular scheduler type.""" return ThreadPoolExecutor() def _create_default_jobstore(self): """Creates a default job store, specific to the particular scheduler type.""" return MemoryJobStore() def _lookup_executor(self, alias): """ Returns the executor instance by the given name from the list of executors that were added to this scheduler. :type alias: str :raises KeyError: if no executor by the given alias is not found """ try: return self._executors[alias] except KeyError: raise KeyError('No such executor: %s' % alias) def _lookup_jobstore(self, alias): """ Returns the job store instance by the given name from the list of job stores that were added to this scheduler. :type alias: str :raises KeyError: if no job store by the given alias is not found """ try: return self._jobstores[alias] except KeyError: raise KeyError('No such job store: %s' % alias) def _lookup_job(self, job_id, jobstore_alias): """ Finds a job by its ID. :type job_id: str :param str jobstore_alias: alias of a job store to look in :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job) :raises JobLookupError: if no job by the given ID is found. """ if self.state == STATE_STOPPED: # Check if the job is among the pending jobs for job, alias, replace_existing in self._pending_jobs: if job.id == job_id: return job, None else: # Look in all job stores for alias, store in six.iteritems(self._jobstores): if jobstore_alias in (None, alias): job = store.lookup_job(job_id) if job is not None: return job, alias raise JobLookupError(job_id) def _dispatch_event(self, event): """ Dispatches the given event to interested listeners. :param SchedulerEvent event: the event to send """ with self._listeners_lock: listeners = tuple(self._listeners) for cb, mask in listeners: if event.code & mask: try: cb(event) except: self._logger.exception('Error notifying listener') def _real_add_job(self, job, jobstore_alias, replace_existing): """ :param Job job: the job to add :param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store """ # Fill in undefined values with defaults replacements = {} for key, value in six.iteritems(self._job_defaults): if not hasattr(job, key): replacements[key] = value # Calculate the next run time if there is none defined if not hasattr(job, 'next_run_time'): now = datetime.now(self.timezone) replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now) # Apply any replacements job._modify(**replacements) # Add the job to the given job store store = self._lookup_jobstore(jobstore_alias) try: store.add_job(job) except ConflictingIdError: if replace_existing: store.update_job(job) else: raise # Mark the job as no longer pending job._jobstore_alias = jobstore_alias # Notify listeners that a new job has been added event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) self._dispatch_event(event) self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) # Notify the scheduler about the new job if self.state == STATE_RUNNING: self.wakeup() def _create_plugin_instance(self, type_, alias, constructor_kwargs): """Creates an instance of the given plugin type, loading the plugin first if necessary.""" plugin_container, class_container, base_class = { 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger), 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore), 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor) }[type_] try: plugin_cls = class_container[alias] except KeyError: if alias in plugin_container: plugin_cls = class_container[alias] = plugin_container[alias].load() if not issubclass(plugin_cls, base_class): raise TypeError('The {0} entry point does not point to a {0} class'. format(type_)) else: raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias)) return plugin_cls(**constructor_kwargs) def _create_trigger(self, trigger, trigger_args): if isinstance(trigger, BaseTrigger): return trigger elif trigger is None: trigger = 'date' elif not isinstance(trigger, six.string_types): raise TypeError('Expected a trigger instance or string, got %s instead' % trigger.__class__.__name__) # Use the scheduler's time zone if nothing else is specified trigger_args.setdefault('timezone', self.timezone) # Instantiate the trigger class return self._create_plugin_instance('trigger', trigger, trigger_args) def _create_lock(self): """Creates a reentrant lock object.""" return RLock() def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning('Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[-1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug('Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = max(timedelta_seconds(next_wakeup_time - now), 0) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds
42,201
Python
.py
822
38.609489
99
0.600704
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,801
win32.py
evilhero_mylar/lib/tzlocal/win32.py
try: import _winreg as winreg except ImportError: import winreg from tzlocal.windows_tz import win_tz import pytz _cache_tz = None def valuestodict(key): """Convert a registry key's values to a dictionary.""" dict = {} size = winreg.QueryInfoKey(key)[1] for i in range(size): data = winreg.EnumValue(key, i) dict[data[0]] = data[1] return dict def get_localzone_name(): # Windows is special. It has unique time zone names (in several # meanings of the word) available, but unfortunately, they can be # translated to the language of the operating system, so we need to # do a backwards lookup, by going through all time zones and see which # one matches. handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) keyvalues = valuestodict(localtz) localtz.Close() if 'TimeZoneKeyName' in keyvalues: # Windows 7 (and Vista?) # For some reason this returns a string with loads of NUL bytes at # least on some systems. I don't know if this is a bug somewhere, I # just work around it. tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0] else: # Windows 2000 or XP # This is the localized name: tzwin = keyvalues['StandardName'] # Open the list of timezones to look up the real name: TZKEYNAME = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" tzkey = winreg.OpenKey(handle, TZKEYNAME) # Now, match this value to Time Zone information tzkeyname = None for i in range(winreg.QueryInfoKey(tzkey)[0]): subkey = winreg.EnumKey(tzkey, i) sub = winreg.OpenKey(tzkey, subkey) data = valuestodict(sub) sub.Close() try: if data['Std'] == tzwin: tzkeyname = subkey break except KeyError: # This timezone didn't have proper configuration. # Ignore it. pass tzkey.Close() handle.Close() if tzkeyname is None: raise LookupError('Can not find Windows timezone configuration') timezone = win_tz.get(tzkeyname) if timezone is None: # Nope, that didn't work. Try adding "Standard Time", # it seems to work a lot of times: timezone = win_tz.get(tzkeyname + " Standard Time") # Return what we have. if timezone is None: raise pytz.UnknownTimeZoneError('Can not find timezone ' + tzkeyname) return timezone def get_localzone(): """Returns the zoneinfo-based tzinfo object that matches the Windows-configured timezone.""" global _cache_tz if _cache_tz is None: _cache_tz = pytz.timezone(get_localzone_name()) return _cache_tz def reload_localzone(): """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz _cache_tz = pytz.timezone(get_localzone_name())
3,135
Python
.py
77
33.064935
96
0.653517
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,802
windows_tz.py
evilhero_mylar/lib/tzlocal/windows_tz.py
# This file is autogenerated by the get_windows_info.py script # Do not edit. win_tz = {'AUS Central Standard Time': 'Australia/Darwin', 'AUS Eastern Standard Time': 'Australia/Sydney', 'Afghanistan Standard Time': 'Asia/Kabul', 'Alaskan Standard Time': 'America/Anchorage', 'Arab Standard Time': 'Asia/Riyadh', 'Arabian Standard Time': 'Asia/Dubai', 'Arabic Standard Time': 'Asia/Baghdad', 'Argentina Standard Time': 'America/Buenos_Aires', 'Atlantic Standard Time': 'America/Halifax', 'Azerbaijan Standard Time': 'Asia/Baku', 'Azores Standard Time': 'Atlantic/Azores', 'Bahia Standard Time': 'America/Bahia', 'Bangladesh Standard Time': 'Asia/Dhaka', 'Belarus Standard Time': 'Europe/Minsk', 'Canada Central Standard Time': 'America/Regina', 'Cape Verde Standard Time': 'Atlantic/Cape_Verde', 'Caucasus Standard Time': 'Asia/Yerevan', 'Cen. Australia Standard Time': 'Australia/Adelaide', 'Central America Standard Time': 'America/Guatemala', 'Central Asia Standard Time': 'Asia/Almaty', 'Central Brazilian Standard Time': 'America/Cuiaba', 'Central Europe Standard Time': 'Europe/Budapest', 'Central European Standard Time': 'Europe/Warsaw', 'Central Pacific Standard Time': 'Pacific/Guadalcanal', 'Central Standard Time': 'America/Chicago', 'Central Standard Time (Mexico)': 'America/Mexico_City', 'China Standard Time': 'Asia/Shanghai', 'Dateline Standard Time': 'Etc/GMT+12', 'E. Africa Standard Time': 'Africa/Nairobi', 'E. Australia Standard Time': 'Australia/Brisbane', 'E. South America Standard Time': 'America/Sao_Paulo', 'Eastern Standard Time': 'America/New_York', 'Egypt Standard Time': 'Africa/Cairo', 'Ekaterinburg Standard Time': 'Asia/Yekaterinburg', 'FLE Standard Time': 'Europe/Kiev', 'Fiji Standard Time': 'Pacific/Fiji', 'GMT Standard Time': 'Europe/London', 'GTB Standard Time': 'Europe/Bucharest', 'Georgian Standard Time': 'Asia/Tbilisi', 'Greenland Standard Time': 'America/Godthab', 'Greenwich Standard Time': 'Atlantic/Reykjavik', 'Hawaiian Standard Time': 'Pacific/Honolulu', 'India Standard Time': 'Asia/Calcutta', 'Iran Standard Time': 'Asia/Tehran', 'Israel Standard Time': 'Asia/Jerusalem', 'Jordan Standard Time': 'Asia/Amman', 'Kaliningrad Standard Time': 'Europe/Kaliningrad', 'Korea Standard Time': 'Asia/Seoul', 'Libya Standard Time': 'Africa/Tripoli', 'Line Islands Standard Time': 'Pacific/Kiritimati', 'Magadan Standard Time': 'Asia/Magadan', 'Mauritius Standard Time': 'Indian/Mauritius', 'Middle East Standard Time': 'Asia/Beirut', 'Montevideo Standard Time': 'America/Montevideo', 'Morocco Standard Time': 'Africa/Casablanca', 'Mountain Standard Time': 'America/Denver', 'Mountain Standard Time (Mexico)': 'America/Chihuahua', 'Myanmar Standard Time': 'Asia/Rangoon', 'N. Central Asia Standard Time': 'Asia/Novosibirsk', 'Namibia Standard Time': 'Africa/Windhoek', 'Nepal Standard Time': 'Asia/Katmandu', 'New Zealand Standard Time': 'Pacific/Auckland', 'Newfoundland Standard Time': 'America/St_Johns', 'North Asia East Standard Time': 'Asia/Irkutsk', 'North Asia Standard Time': 'Asia/Krasnoyarsk', 'Pacific SA Standard Time': 'America/Santiago', 'Pacific Standard Time': 'America/Los_Angeles', 'Pacific Standard Time (Mexico)': 'America/Santa_Isabel', 'Pakistan Standard Time': 'Asia/Karachi', 'Paraguay Standard Time': 'America/Asuncion', 'Romance Standard Time': 'Europe/Paris', 'Russia Time Zone 10': 'Asia/Srednekolymsk', 'Russia Time Zone 11': 'Asia/Kamchatka', 'Russia Time Zone 3': 'Europe/Samara', 'Russian Standard Time': 'Europe/Moscow', 'SA Eastern Standard Time': 'America/Cayenne', 'SA Pacific Standard Time': 'America/Bogota', 'SA Western Standard Time': 'America/La_Paz', 'SE Asia Standard Time': 'Asia/Bangkok', 'Samoa Standard Time': 'Pacific/Apia', 'Singapore Standard Time': 'Asia/Singapore', 'South Africa Standard Time': 'Africa/Johannesburg', 'Sri Lanka Standard Time': 'Asia/Colombo', 'Syria Standard Time': 'Asia/Damascus', 'Taipei Standard Time': 'Asia/Taipei', 'Tasmania Standard Time': 'Australia/Hobart', 'Tokyo Standard Time': 'Asia/Tokyo', 'Tonga Standard Time': 'Pacific/Tongatapu', 'Turkey Standard Time': 'Europe/Istanbul', 'US Eastern Standard Time': 'America/Indianapolis', 'US Mountain Standard Time': 'America/Phoenix', 'UTC': 'Etc/GMT', 'UTC+12': 'Etc/GMT-12', 'UTC-02': 'Etc/GMT+2', 'UTC-11': 'Etc/GMT+11', 'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar', 'Venezuela Standard Time': 'America/Caracas', 'Vladivostok Standard Time': 'Asia/Vladivostok', 'W. Australia Standard Time': 'Australia/Perth', 'W. Central Africa Standard Time': 'Africa/Lagos', 'W. Europe Standard Time': 'Europe/Berlin', 'West Asia Standard Time': 'Asia/Tashkent', 'West Pacific Standard Time': 'Pacific/Port_Moresby', 'Yakutsk Standard Time': 'Asia/Yakutsk'} # Old name for the win_tz variable: tz_names = win_tz tz_win = {'Africa/Abidjan': 'Greenwich Standard Time', 'Africa/Accra': 'Greenwich Standard Time', 'Africa/Addis_Ababa': 'E. Africa Standard Time', 'Africa/Algiers': 'W. Central Africa Standard Time', 'Africa/Asmera': 'E. Africa Standard Time', 'Africa/Bamako': 'Greenwich Standard Time', 'Africa/Bangui': 'W. Central Africa Standard Time', 'Africa/Banjul': 'Greenwich Standard Time', 'Africa/Bissau': 'Greenwich Standard Time', 'Africa/Blantyre': 'South Africa Standard Time', 'Africa/Brazzaville': 'W. Central Africa Standard Time', 'Africa/Bujumbura': 'South Africa Standard Time', 'Africa/Cairo': 'Egypt Standard Time', 'Africa/Casablanca': 'Morocco Standard Time', 'Africa/Ceuta': 'Romance Standard Time', 'Africa/Conakry': 'Greenwich Standard Time', 'Africa/Dakar': 'Greenwich Standard Time', 'Africa/Dar_es_Salaam': 'E. Africa Standard Time', 'Africa/Djibouti': 'E. Africa Standard Time', 'Africa/Douala': 'W. Central Africa Standard Time', 'Africa/El_Aaiun': 'Morocco Standard Time', 'Africa/Freetown': 'Greenwich Standard Time', 'Africa/Gaborone': 'South Africa Standard Time', 'Africa/Harare': 'South Africa Standard Time', 'Africa/Johannesburg': 'South Africa Standard Time', 'Africa/Juba': 'E. Africa Standard Time', 'Africa/Kampala': 'E. Africa Standard Time', 'Africa/Khartoum': 'E. Africa Standard Time', 'Africa/Kigali': 'South Africa Standard Time', 'Africa/Kinshasa': 'W. Central Africa Standard Time', 'Africa/Lagos': 'W. Central Africa Standard Time', 'Africa/Libreville': 'W. Central Africa Standard Time', 'Africa/Lome': 'Greenwich Standard Time', 'Africa/Luanda': 'W. Central Africa Standard Time', 'Africa/Lubumbashi': 'South Africa Standard Time', 'Africa/Lusaka': 'South Africa Standard Time', 'Africa/Malabo': 'W. Central Africa Standard Time', 'Africa/Maputo': 'South Africa Standard Time', 'Africa/Maseru': 'South Africa Standard Time', 'Africa/Mbabane': 'South Africa Standard Time', 'Africa/Mogadishu': 'E. Africa Standard Time', 'Africa/Monrovia': 'Greenwich Standard Time', 'Africa/Nairobi': 'E. Africa Standard Time', 'Africa/Ndjamena': 'W. Central Africa Standard Time', 'Africa/Niamey': 'W. Central Africa Standard Time', 'Africa/Nouakchott': 'Greenwich Standard Time', 'Africa/Ouagadougou': 'Greenwich Standard Time', 'Africa/Porto-Novo': 'W. Central Africa Standard Time', 'Africa/Sao_Tome': 'Greenwich Standard Time', 'Africa/Tripoli': 'Libya Standard Time', 'Africa/Tunis': 'W. Central Africa Standard Time', 'Africa/Windhoek': 'Namibia Standard Time', 'America/Anchorage': 'Alaskan Standard Time', 'America/Anguilla': 'SA Western Standard Time', 'America/Antigua': 'SA Western Standard Time', 'America/Araguaina': 'SA Eastern Standard Time', 'America/Argentina/La_Rioja': 'Argentina Standard Time', 'America/Argentina/Rio_Gallegos': 'Argentina Standard Time', 'America/Argentina/Salta': 'Argentina Standard Time', 'America/Argentina/San_Juan': 'Argentina Standard Time', 'America/Argentina/San_Luis': 'Argentina Standard Time', 'America/Argentina/Tucuman': 'Argentina Standard Time', 'America/Argentina/Ushuaia': 'Argentina Standard Time', 'America/Aruba': 'SA Western Standard Time', 'America/Asuncion': 'Paraguay Standard Time', 'America/Bahia': 'Bahia Standard Time', 'America/Bahia_Banderas': 'Central Standard Time (Mexico)', 'America/Barbados': 'SA Western Standard Time', 'America/Belem': 'SA Eastern Standard Time', 'America/Belize': 'Central America Standard Time', 'America/Blanc-Sablon': 'SA Western Standard Time', 'America/Boa_Vista': 'SA Western Standard Time', 'America/Bogota': 'SA Pacific Standard Time', 'America/Boise': 'Mountain Standard Time', 'America/Buenos_Aires': 'Argentina Standard Time', 'America/Cambridge_Bay': 'Mountain Standard Time', 'America/Campo_Grande': 'Central Brazilian Standard Time', 'America/Cancun': 'Central Standard Time (Mexico)', 'America/Caracas': 'Venezuela Standard Time', 'America/Catamarca': 'Argentina Standard Time', 'America/Cayenne': 'SA Eastern Standard Time', 'America/Cayman': 'SA Pacific Standard Time', 'America/Chicago': 'Central Standard Time', 'America/Chihuahua': 'Mountain Standard Time (Mexico)', 'America/Coral_Harbour': 'SA Pacific Standard Time', 'America/Cordoba': 'Argentina Standard Time', 'America/Costa_Rica': 'Central America Standard Time', 'America/Creston': 'US Mountain Standard Time', 'America/Cuiaba': 'Central Brazilian Standard Time', 'America/Curacao': 'SA Western Standard Time', 'America/Danmarkshavn': 'UTC', 'America/Dawson': 'Pacific Standard Time', 'America/Dawson_Creek': 'US Mountain Standard Time', 'America/Denver': 'Mountain Standard Time', 'America/Detroit': 'Eastern Standard Time', 'America/Dominica': 'SA Western Standard Time', 'America/Edmonton': 'Mountain Standard Time', 'America/Eirunepe': 'SA Pacific Standard Time', 'America/El_Salvador': 'Central America Standard Time', 'America/Fortaleza': 'SA Eastern Standard Time', 'America/Glace_Bay': 'Atlantic Standard Time', 'America/Godthab': 'Greenland Standard Time', 'America/Goose_Bay': 'Atlantic Standard Time', 'America/Grand_Turk': 'SA Western Standard Time', 'America/Grenada': 'SA Western Standard Time', 'America/Guadeloupe': 'SA Western Standard Time', 'America/Guatemala': 'Central America Standard Time', 'America/Guayaquil': 'SA Pacific Standard Time', 'America/Guyana': 'SA Western Standard Time', 'America/Halifax': 'Atlantic Standard Time', 'America/Havana': 'Eastern Standard Time', 'America/Hermosillo': 'US Mountain Standard Time', 'America/Indiana/Knox': 'Central Standard Time', 'America/Indiana/Marengo': 'US Eastern Standard Time', 'America/Indiana/Petersburg': 'Eastern Standard Time', 'America/Indiana/Tell_City': 'Central Standard Time', 'America/Indiana/Vevay': 'US Eastern Standard Time', 'America/Indiana/Vincennes': 'Eastern Standard Time', 'America/Indiana/Winamac': 'Eastern Standard Time', 'America/Indianapolis': 'US Eastern Standard Time', 'America/Inuvik': 'Mountain Standard Time', 'America/Iqaluit': 'Eastern Standard Time', 'America/Jamaica': 'SA Pacific Standard Time', 'America/Jujuy': 'Argentina Standard Time', 'America/Juneau': 'Alaskan Standard Time', 'America/Kentucky/Monticello': 'Eastern Standard Time', 'America/Kralendijk': 'SA Western Standard Time', 'America/La_Paz': 'SA Western Standard Time', 'America/Lima': 'SA Pacific Standard Time', 'America/Los_Angeles': 'Pacific Standard Time', 'America/Louisville': 'Eastern Standard Time', 'America/Lower_Princes': 'SA Western Standard Time', 'America/Maceio': 'SA Eastern Standard Time', 'America/Managua': 'Central America Standard Time', 'America/Manaus': 'SA Western Standard Time', 'America/Marigot': 'SA Western Standard Time', 'America/Martinique': 'SA Western Standard Time', 'America/Matamoros': 'Central Standard Time', 'America/Mazatlan': 'Mountain Standard Time (Mexico)', 'America/Mendoza': 'Argentina Standard Time', 'America/Menominee': 'Central Standard Time', 'America/Merida': 'Central Standard Time (Mexico)', 'America/Mexico_City': 'Central Standard Time (Mexico)', 'America/Moncton': 'Atlantic Standard Time', 'America/Monterrey': 'Central Standard Time (Mexico)', 'America/Montevideo': 'Montevideo Standard Time', 'America/Montreal': 'Eastern Standard Time', 'America/Montserrat': 'SA Western Standard Time', 'America/Nassau': 'Eastern Standard Time', 'America/New_York': 'Eastern Standard Time', 'America/Nipigon': 'Eastern Standard Time', 'America/Nome': 'Alaskan Standard Time', 'America/Noronha': 'UTC-02', 'America/North_Dakota/Beulah': 'Central Standard Time', 'America/North_Dakota/Center': 'Central Standard Time', 'America/North_Dakota/New_Salem': 'Central Standard Time', 'America/Ojinaga': 'Mountain Standard Time', 'America/Panama': 'SA Pacific Standard Time', 'America/Pangnirtung': 'Eastern Standard Time', 'America/Paramaribo': 'SA Eastern Standard Time', 'America/Phoenix': 'US Mountain Standard Time', 'America/Port-au-Prince': 'Eastern Standard Time', 'America/Port_of_Spain': 'SA Western Standard Time', 'America/Porto_Velho': 'SA Western Standard Time', 'America/Puerto_Rico': 'SA Western Standard Time', 'America/Rainy_River': 'Central Standard Time', 'America/Rankin_Inlet': 'Central Standard Time', 'America/Recife': 'SA Eastern Standard Time', 'America/Regina': 'Canada Central Standard Time', 'America/Resolute': 'Central Standard Time', 'America/Rio_Branco': 'SA Pacific Standard Time', 'America/Santa_Isabel': 'Pacific Standard Time (Mexico)', 'America/Santarem': 'SA Eastern Standard Time', 'America/Santiago': 'Pacific SA Standard Time', 'America/Santo_Domingo': 'SA Western Standard Time', 'America/Sao_Paulo': 'E. South America Standard Time', 'America/Scoresbysund': 'Azores Standard Time', 'America/Sitka': 'Alaskan Standard Time', 'America/St_Barthelemy': 'SA Western Standard Time', 'America/St_Johns': 'Newfoundland Standard Time', 'America/St_Kitts': 'SA Western Standard Time', 'America/St_Lucia': 'SA Western Standard Time', 'America/St_Thomas': 'SA Western Standard Time', 'America/St_Vincent': 'SA Western Standard Time', 'America/Swift_Current': 'Canada Central Standard Time', 'America/Tegucigalpa': 'Central America Standard Time', 'America/Thule': 'Atlantic Standard Time', 'America/Thunder_Bay': 'Eastern Standard Time', 'America/Tijuana': 'Pacific Standard Time', 'America/Toronto': 'Eastern Standard Time', 'America/Tortola': 'SA Western Standard Time', 'America/Vancouver': 'Pacific Standard Time', 'America/Whitehorse': 'Pacific Standard Time', 'America/Winnipeg': 'Central Standard Time', 'America/Yakutat': 'Alaskan Standard Time', 'America/Yellowknife': 'Mountain Standard Time', 'Antarctica/Casey': 'W. Australia Standard Time', 'Antarctica/Davis': 'SE Asia Standard Time', 'Antarctica/DumontDUrville': 'West Pacific Standard Time', 'Antarctica/Macquarie': 'Central Pacific Standard Time', 'Antarctica/Mawson': 'West Asia Standard Time', 'Antarctica/McMurdo': 'New Zealand Standard Time', 'Antarctica/Palmer': 'Pacific SA Standard Time', 'Antarctica/Rothera': 'SA Eastern Standard Time', 'Antarctica/Syowa': 'E. Africa Standard Time', 'Antarctica/Vostok': 'Central Asia Standard Time', 'Arctic/Longyearbyen': 'W. Europe Standard Time', 'Asia/Aden': 'Arab Standard Time', 'Asia/Almaty': 'Central Asia Standard Time', 'Asia/Amman': 'Jordan Standard Time', 'Asia/Anadyr': 'Russia Time Zone 11', 'Asia/Aqtau': 'West Asia Standard Time', 'Asia/Aqtobe': 'West Asia Standard Time', 'Asia/Ashgabat': 'West Asia Standard Time', 'Asia/Baghdad': 'Arabic Standard Time', 'Asia/Bahrain': 'Arab Standard Time', 'Asia/Baku': 'Azerbaijan Standard Time', 'Asia/Bangkok': 'SE Asia Standard Time', 'Asia/Beirut': 'Middle East Standard Time', 'Asia/Bishkek': 'Central Asia Standard Time', 'Asia/Brunei': 'Singapore Standard Time', 'Asia/Calcutta': 'India Standard Time', 'Asia/Chita': 'North Asia East Standard Time', 'Asia/Choibalsan': 'Ulaanbaatar Standard Time', 'Asia/Colombo': 'Sri Lanka Standard Time', 'Asia/Damascus': 'Syria Standard Time', 'Asia/Dhaka': 'Bangladesh Standard Time', 'Asia/Dili': 'Tokyo Standard Time', 'Asia/Dubai': 'Arabian Standard Time', 'Asia/Dushanbe': 'West Asia Standard Time', 'Asia/Hong_Kong': 'China Standard Time', 'Asia/Hovd': 'SE Asia Standard Time', 'Asia/Irkutsk': 'North Asia East Standard Time', 'Asia/Jakarta': 'SE Asia Standard Time', 'Asia/Jayapura': 'Tokyo Standard Time', 'Asia/Jerusalem': 'Israel Standard Time', 'Asia/Kabul': 'Afghanistan Standard Time', 'Asia/Kamchatka': 'Russia Time Zone 11', 'Asia/Karachi': 'Pakistan Standard Time', 'Asia/Katmandu': 'Nepal Standard Time', 'Asia/Khandyga': 'Yakutsk Standard Time', 'Asia/Krasnoyarsk': 'North Asia Standard Time', 'Asia/Kuala_Lumpur': 'Singapore Standard Time', 'Asia/Kuching': 'Singapore Standard Time', 'Asia/Kuwait': 'Arab Standard Time', 'Asia/Macau': 'China Standard Time', 'Asia/Magadan': 'Magadan Standard Time', 'Asia/Makassar': 'Singapore Standard Time', 'Asia/Manila': 'Singapore Standard Time', 'Asia/Muscat': 'Arabian Standard Time', 'Asia/Nicosia': 'GTB Standard Time', 'Asia/Novokuznetsk': 'North Asia Standard Time', 'Asia/Novosibirsk': 'N. Central Asia Standard Time', 'Asia/Omsk': 'N. Central Asia Standard Time', 'Asia/Oral': 'West Asia Standard Time', 'Asia/Phnom_Penh': 'SE Asia Standard Time', 'Asia/Pontianak': 'SE Asia Standard Time', 'Asia/Pyongyang': 'Korea Standard Time', 'Asia/Qatar': 'Arab Standard Time', 'Asia/Qyzylorda': 'Central Asia Standard Time', 'Asia/Rangoon': 'Myanmar Standard Time', 'Asia/Riyadh': 'Arab Standard Time', 'Asia/Saigon': 'SE Asia Standard Time', 'Asia/Sakhalin': 'Vladivostok Standard Time', 'Asia/Samarkand': 'West Asia Standard Time', 'Asia/Seoul': 'Korea Standard Time', 'Asia/Shanghai': 'China Standard Time', 'Asia/Singapore': 'Singapore Standard Time', 'Asia/Srednekolymsk': 'Russia Time Zone 10', 'Asia/Taipei': 'Taipei Standard Time', 'Asia/Tashkent': 'West Asia Standard Time', 'Asia/Tbilisi': 'Georgian Standard Time', 'Asia/Tehran': 'Iran Standard Time', 'Asia/Thimphu': 'Bangladesh Standard Time', 'Asia/Tokyo': 'Tokyo Standard Time', 'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time', 'Asia/Urumqi': 'Central Asia Standard Time', 'Asia/Ust-Nera': 'Vladivostok Standard Time', 'Asia/Vientiane': 'SE Asia Standard Time', 'Asia/Vladivostok': 'Vladivostok Standard Time', 'Asia/Yakutsk': 'Yakutsk Standard Time', 'Asia/Yekaterinburg': 'Ekaterinburg Standard Time', 'Asia/Yerevan': 'Caucasus Standard Time', 'Atlantic/Azores': 'Azores Standard Time', 'Atlantic/Bermuda': 'Atlantic Standard Time', 'Atlantic/Canary': 'GMT Standard Time', 'Atlantic/Cape_Verde': 'Cape Verde Standard Time', 'Atlantic/Faeroe': 'GMT Standard Time', 'Atlantic/Madeira': 'GMT Standard Time', 'Atlantic/Reykjavik': 'Greenwich Standard Time', 'Atlantic/South_Georgia': 'UTC-02', 'Atlantic/St_Helena': 'Greenwich Standard Time', 'Atlantic/Stanley': 'SA Eastern Standard Time', 'Australia/Adelaide': 'Cen. Australia Standard Time', 'Australia/Brisbane': 'E. Australia Standard Time', 'Australia/Broken_Hill': 'Cen. Australia Standard Time', 'Australia/Currie': 'Tasmania Standard Time', 'Australia/Darwin': 'AUS Central Standard Time', 'Australia/Hobart': 'Tasmania Standard Time', 'Australia/Lindeman': 'E. Australia Standard Time', 'Australia/Melbourne': 'AUS Eastern Standard Time', 'Australia/Perth': 'W. Australia Standard Time', 'Australia/Sydney': 'AUS Eastern Standard Time', 'CST6CDT': 'Central Standard Time', 'EST5EDT': 'Eastern Standard Time', 'Etc/GMT': 'UTC', 'Etc/GMT+1': 'Cape Verde Standard Time', 'Etc/GMT+10': 'Hawaiian Standard Time', 'Etc/GMT+11': 'UTC-11', 'Etc/GMT+12': 'Dateline Standard Time', 'Etc/GMT+2': 'UTC-02', 'Etc/GMT+3': 'SA Eastern Standard Time', 'Etc/GMT+4': 'SA Western Standard Time', 'Etc/GMT+5': 'SA Pacific Standard Time', 'Etc/GMT+6': 'Central America Standard Time', 'Etc/GMT+7': 'US Mountain Standard Time', 'Etc/GMT-1': 'W. Central Africa Standard Time', 'Etc/GMT-10': 'West Pacific Standard Time', 'Etc/GMT-11': 'Central Pacific Standard Time', 'Etc/GMT-12': 'UTC+12', 'Etc/GMT-13': 'Tonga Standard Time', 'Etc/GMT-14': 'Line Islands Standard Time', 'Etc/GMT-2': 'South Africa Standard Time', 'Etc/GMT-3': 'E. Africa Standard Time', 'Etc/GMT-4': 'Arabian Standard Time', 'Etc/GMT-5': 'West Asia Standard Time', 'Etc/GMT-6': 'Central Asia Standard Time', 'Etc/GMT-7': 'SE Asia Standard Time', 'Etc/GMT-8': 'Singapore Standard Time', 'Etc/GMT-9': 'Tokyo Standard Time', 'Etc/UTC': 'UTC', 'Europe/Amsterdam': 'W. Europe Standard Time', 'Europe/Andorra': 'W. Europe Standard Time', 'Europe/Athens': 'GTB Standard Time', 'Europe/Belgrade': 'Central Europe Standard Time', 'Europe/Berlin': 'W. Europe Standard Time', 'Europe/Bratislava': 'Central Europe Standard Time', 'Europe/Brussels': 'Romance Standard Time', 'Europe/Bucharest': 'GTB Standard Time', 'Europe/Budapest': 'Central Europe Standard Time', 'Europe/Busingen': 'W. Europe Standard Time', 'Europe/Chisinau': 'GTB Standard Time', 'Europe/Copenhagen': 'Romance Standard Time', 'Europe/Dublin': 'GMT Standard Time', 'Europe/Gibraltar': 'W. Europe Standard Time', 'Europe/Guernsey': 'GMT Standard Time', 'Europe/Helsinki': 'FLE Standard Time', 'Europe/Isle_of_Man': 'GMT Standard Time', 'Europe/Istanbul': 'Turkey Standard Time', 'Europe/Jersey': 'GMT Standard Time', 'Europe/Kaliningrad': 'Kaliningrad Standard Time', 'Europe/Kiev': 'FLE Standard Time', 'Europe/Lisbon': 'GMT Standard Time', 'Europe/Ljubljana': 'Central Europe Standard Time', 'Europe/London': 'GMT Standard Time', 'Europe/Luxembourg': 'W. Europe Standard Time', 'Europe/Madrid': 'Romance Standard Time', 'Europe/Malta': 'W. Europe Standard Time', 'Europe/Mariehamn': 'FLE Standard Time', 'Europe/Minsk': 'Belarus Standard Time', 'Europe/Monaco': 'W. Europe Standard Time', 'Europe/Moscow': 'Russian Standard Time', 'Europe/Oslo': 'W. Europe Standard Time', 'Europe/Paris': 'Romance Standard Time', 'Europe/Podgorica': 'Central Europe Standard Time', 'Europe/Prague': 'Central Europe Standard Time', 'Europe/Riga': 'FLE Standard Time', 'Europe/Rome': 'W. Europe Standard Time', 'Europe/Samara': 'Russia Time Zone 3', 'Europe/San_Marino': 'W. Europe Standard Time', 'Europe/Sarajevo': 'Central European Standard Time', 'Europe/Simferopol': 'Russian Standard Time', 'Europe/Skopje': 'Central European Standard Time', 'Europe/Sofia': 'FLE Standard Time', 'Europe/Stockholm': 'W. Europe Standard Time', 'Europe/Tallinn': 'FLE Standard Time', 'Europe/Tirane': 'Central Europe Standard Time', 'Europe/Uzhgorod': 'FLE Standard Time', 'Europe/Vaduz': 'W. Europe Standard Time', 'Europe/Vatican': 'W. Europe Standard Time', 'Europe/Vienna': 'W. Europe Standard Time', 'Europe/Vilnius': 'FLE Standard Time', 'Europe/Volgograd': 'Russian Standard Time', 'Europe/Warsaw': 'Central European Standard Time', 'Europe/Zagreb': 'Central European Standard Time', 'Europe/Zaporozhye': 'FLE Standard Time', 'Europe/Zurich': 'W. Europe Standard Time', 'Indian/Antananarivo': 'E. Africa Standard Time', 'Indian/Chagos': 'Central Asia Standard Time', 'Indian/Christmas': 'SE Asia Standard Time', 'Indian/Cocos': 'Myanmar Standard Time', 'Indian/Comoro': 'E. Africa Standard Time', 'Indian/Kerguelen': 'West Asia Standard Time', 'Indian/Mahe': 'Mauritius Standard Time', 'Indian/Maldives': 'West Asia Standard Time', 'Indian/Mauritius': 'Mauritius Standard Time', 'Indian/Mayotte': 'E. Africa Standard Time', 'Indian/Reunion': 'Mauritius Standard Time', 'MST7MDT': 'Mountain Standard Time', 'PST8PDT': 'Pacific Standard Time', 'Pacific/Apia': 'Samoa Standard Time', 'Pacific/Auckland': 'New Zealand Standard Time', 'Pacific/Efate': 'Central Pacific Standard Time', 'Pacific/Enderbury': 'Tonga Standard Time', 'Pacific/Fakaofo': 'Tonga Standard Time', 'Pacific/Fiji': 'Fiji Standard Time', 'Pacific/Funafuti': 'UTC+12', 'Pacific/Galapagos': 'Central America Standard Time', 'Pacific/Guadalcanal': 'Central Pacific Standard Time', 'Pacific/Guam': 'West Pacific Standard Time', 'Pacific/Honolulu': 'Hawaiian Standard Time', 'Pacific/Johnston': 'Hawaiian Standard Time', 'Pacific/Kiritimati': 'Line Islands Standard Time', 'Pacific/Kosrae': 'Central Pacific Standard Time', 'Pacific/Kwajalein': 'UTC+12', 'Pacific/Majuro': 'UTC+12', 'Pacific/Midway': 'UTC-11', 'Pacific/Nauru': 'UTC+12', 'Pacific/Niue': 'UTC-11', 'Pacific/Noumea': 'Central Pacific Standard Time', 'Pacific/Pago_Pago': 'UTC-11', 'Pacific/Palau': 'Tokyo Standard Time', 'Pacific/Ponape': 'Central Pacific Standard Time', 'Pacific/Port_Moresby': 'West Pacific Standard Time', 'Pacific/Rarotonga': 'Hawaiian Standard Time', 'Pacific/Saipan': 'West Pacific Standard Time', 'Pacific/Tahiti': 'Hawaiian Standard Time', 'Pacific/Tarawa': 'UTC+12', 'Pacific/Tongatapu': 'Tonga Standard Time', 'Pacific/Truk': 'West Pacific Standard Time', 'Pacific/Wake': 'UTC+12', 'Pacific/Wallis': 'UTC+12'}
24,987
Python
.py
540
44.27963
62
0.738515
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,803
unix.py
evilhero_mylar/lib/tzlocal/unix.py
from __future__ import with_statement import os import re import pytz _cache_tz = None def _tz_from_env(tzenv): if tzenv[0] == ':': tzenv = tzenv[1:] # TZ specifies a file if os.path.exists(tzenv): with open(tzenv, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) # TZ specifies a zoneinfo zone. try: tz = pytz.timezone(tzenv) # That worked, so we return this: return tz except pytz.UnknownTimeZoneError: raise pytz.UnknownTimeZoneError( "tzlocal() does not support non-zoneinfo timezones like %s. \n" "Please use a timezone in the form of Continent/City") def _get_localzone(_root='/'): """Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" tzenv = os.environ.get('TZ') if tzenv: try: return _tz_from_env(tzenv) except pytz.UnknownTimeZoneError: pass # Now look for distribution specific configuration files # that contain the timezone name. tzpath = os.path.join(_root, 'etc/timezone') if os.path.exists(tzpath): with open(tzpath, 'rb') as tzfile: data = tzfile.read() # Issue #3 was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: if data[:5] != 'TZif2': etctz = data.strip().decode() # Get rid of host definitions and comments: if ' ' in etctz: etctz, dummy = etctz.split(' ', 1) if '#' in etctz: etctz, dummy = etctz.split('#', 1) return pytz.timezone(etctz.replace(' ', '_')) # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: zone_re = re.compile('\s*ZONE\s*=\s*\"') timezone_re = re.compile('\s*TIMEZONE\s*=\s*\"') end_re = re.compile('\"') for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rt') as tzfile: data = tzfile.readlines() for line in data: # Look for the ZONE= setting. match = zone_re.match(line) if match is None: # No ZONE= setting. Look for the TIMEZONE= setting. match = timezone_re.match(line) if match is not None: # Some setting existed line = line[match.end():] etctz = line[:end_re.search(line).start()] # We found a timezone return pytz.timezone(etctz.replace(' ', '_')) # No explicit setting existed. Use localtime for filename in ('etc/localtime', 'usr/local/etc/localtime'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') def get_localzone(): """Get the computers configured local timezone, if any.""" global _cache_tz if _cache_tz is None: _cache_tz = _get_localzone() return _cache_tz def reload_localzone(): """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz _cache_tz = _get_localzone() return _cache_tz
4,032
Python
.py
95
33.610526
89
0.61348
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,804
__init__.py
evilhero_mylar/lib/tzlocal/__init__.py
import sys if sys.platform == 'win32': from tzlocal.win32 import get_localzone, reload_localzone elif 'darwin' in sys.platform: from tzlocal.darwin import get_localzone, reload_localzone else: from tzlocal.unix import get_localzone, reload_localzone
262
Python
.py
7
34.714286
62
0.788235
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,805
tests.py
evilhero_mylar/lib/tzlocal/tests.py
import sys import os from datetime import datetime import unittest import pytz import tzlocal.unix class TzLocalTests(unittest.TestCase): def test_env(self): tz_harare = tzlocal.unix._tz_from_env(':Africa/Harare') self.assertEqual(tz_harare.zone, 'Africa/Harare') # Some Unices allow this as well, so we must allow it: tz_harare = tzlocal.unix._tz_from_env('Africa/Harare') self.assertEqual(tz_harare.zone, 'Africa/Harare') local_path = os.path.split(__file__)[0] tz_local = tzlocal.unix._tz_from_env(':' + os.path.join(local_path, 'test_data', 'Harare')) self.assertEqual(tz_local.zone, 'local') # Make sure the local timezone is the same as the Harare one above. # We test this with a past date, so that we don't run into future changes # of the Harare timezone. dt = datetime(2012, 1, 1, 5) self.assertEqual(tz_harare.localize(dt), tz_local.localize(dt)) # Non-zoneinfo timezones are not supported in the TZ environment. self.assertRaises(pytz.UnknownTimeZoneError, tzlocal.unix._tz_from_env, 'GMT+03:00') def test_timezone(self): # Most versions of Ubuntu local_path = os.path.split(__file__)[0] tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'timezone')) self.assertEqual(tz.zone, 'Africa/Harare') def test_zone_setting(self): # A ZONE setting in /etc/sysconfig/clock, f ex CentOS local_path = os.path.split(__file__)[0] tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'zone_setting')) self.assertEqual(tz.zone, 'Africa/Harare') def test_timezone_setting(self): # A ZONE setting in /etc/conf.d/clock, f ex Gentoo local_path = os.path.split(__file__)[0] tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'timezone_setting')) self.assertEqual(tz.zone, 'Africa/Harare') def test_only_localtime(self): local_path = os.path.split(__file__)[0] tz = tzlocal.unix._get_localzone(_root=os.path.join(local_path, 'test_data', 'localtime')) self.assertEqual(tz.zone, 'local') dt = datetime(2012, 1, 1, 5) self.assertEqual(pytz.timezone('Africa/Harare').localize(dt), tz.localize(dt)) if sys.platform == 'win32': import tzlocal.win32 class TzWin32Tests(unittest.TestCase): def test_win32(self): tzlocal.win32.get_localzone() if __name__ == '__main__': unittest.main()
2,567
Python
.py
51
43.039216
105
0.657611
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,806
darwin.py
evilhero_mylar/lib/tzlocal/darwin.py
from __future__ import with_statement import os import pytz _cache_tz = None def _get_localzone(): tzname = os.popen("systemsetup -gettimezone").read().replace("Time Zone: ", "").strip() if not tzname or tzname not in pytz.all_timezones_set: # link will be something like /usr/share/zoneinfo/America/Los_Angeles. link = os.readlink("/etc/localtime") tzname = link[link.rfind("zoneinfo/") + 9:] return pytz.timezone(tzname) def get_localzone(): """Get the computers configured local timezone, if any.""" global _cache_tz if _cache_tz is None: _cache_tz = _get_localzone() return _cache_tz def reload_localzone(): """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz _cache_tz = _get_localzone() return _cache_tz
842
Python
.py
22
33.590909
91
0.679755
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,807
element.py
evilhero_mylar/lib/bs4/element.py
# Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __license__ = "MIT" import collections import re import shlex import sys import warnings from bs4.dammit import EntitySubstitution DEFAULT_OUTPUT_ENCODING = "utf-8" PY3K = (sys.version_info[0] > 2) whitespace_re = re.compile("\s+") def _alias(attr): """Alias one attribute name to another for backward compatibility""" @property def alias(self): return getattr(self, attr) @alias.setter def alias(self): return setattr(self, attr) return alias class NamespacedAttribute(unicode): def __new__(cls, prefix, name, namespace=None): if name is None: obj = unicode.__new__(cls, prefix) elif prefix is None: # Not really namespaced. obj = unicode.__new__(cls, name) else: obj = unicode.__new__(cls, prefix + ":" + name) obj.prefix = prefix obj.name = name obj.namespace = namespace return obj class AttributeValueWithCharsetSubstitution(unicode): """A stand-in object for a character encoding specified in HTML.""" class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'charset' attribute. When Beautiful Soup parses the markup '<meta charset="utf8">', the value of the 'charset' attribute will be one of these objects. """ def __new__(cls, original_value): obj = unicode.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): return encoding class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'content' attribute. When Beautiful Soup parses the markup: <meta http-equiv="content-type" content="text/html; charset=utf8"> The value of the 'content' attribute will be one of these objects. """ CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def __new__(cls, original_value): match = cls.CHARSET_RE.search(original_value) if match is None: # No substitution necessary. return unicode.__new__(unicode, original_value) obj = unicode.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): def rewrite(match): return match.group(1) + encoding return self.CHARSET_RE.sub(rewrite, self.original_value) class HTMLAwareEntitySubstitution(EntitySubstitution): """Entity substitution rules that are aware of some HTML quirks. Specifically, the contents of <script> and <style> tags should not undergo entity substitution. Incoming NavigableString objects are checked to see if they're the direct children of a <script> or <style> tag. """ cdata_containing_tags = set(["script", "style"]) preformatted_tags = set(["pre"]) preserve_whitespace_tags = set(['pre', 'textarea']) @classmethod def _substitute_if_appropriate(cls, ns, f): if (isinstance(ns, NavigableString) and ns.parent is not None and ns.parent.name in cls.cdata_containing_tags): # Do nothing. return ns # Substitute. return f(ns) @classmethod def substitute_html(cls, ns): return cls._substitute_if_appropriate( ns, EntitySubstitution.substitute_html) @classmethod def substitute_xml(cls, ns): return cls._substitute_if_appropriate( ns, EntitySubstitution.substitute_xml) class PageElement(object): """Contains the navigational information for some part of the page (either a tag or a piece of text)""" # There are five possible values for the "formatter" argument passed in # to methods like encode() and prettify(): # # "html" - All Unicode characters with corresponding HTML entities # are converted to those entities on output. # "minimal" - Bare ampersands and angle brackets are converted to # XML entities: &amp; &lt; &gt; # None - The null formatter. Unicode characters are never # converted to entities. This is not recommended, but it's # faster than "minimal". # A function - This function will be called on every string that # needs to undergo entity substitution. # # In an HTML document, the default "html" and "minimal" functions # will leave the contents of <script> and <style> tags alone. For # an XML document, all tags will be given the same treatment. HTML_FORMATTERS = { "html" : HTMLAwareEntitySubstitution.substitute_html, "minimal" : HTMLAwareEntitySubstitution.substitute_xml, None : None } XML_FORMATTERS = { "html" : EntitySubstitution.substitute_html, "minimal" : EntitySubstitution.substitute_xml, None : None } def format_string(self, s, formatter='minimal'): """Format the given string using the given formatter.""" if not callable(formatter): formatter = self._formatter_for_name(formatter) if formatter is None: output = s else: output = formatter(s) return output @property def _is_xml(self): """Is this element part of an XML tree or an HTML tree? This is used when mapping a formatter name ("minimal") to an appropriate function (one that performs entity-substitution on the contents of <script> and <style> tags, or not). It can be inefficient, but it should be called very rarely. """ if self.known_xml is not None: # Most of the time we will have determined this when the # document is parsed. return self.known_xml # Otherwise, it's likely that this element was created by # direct invocation of the constructor from within the user's # Python code. if self.parent is None: # This is the top-level object. It should have .known_xml set # from tree creation. If not, take a guess--BS is usually # used on HTML markup. return getattr(self, 'is_xml', False) return self.parent._is_xml def _formatter_for_name(self, name): "Look up a formatter function based on its name and the tree." if self._is_xml: return self.XML_FORMATTERS.get( name, EntitySubstitution.substitute_xml) else: return self.HTML_FORMATTERS.get( name, HTMLAwareEntitySubstitution.substitute_xml) def setup(self, parent=None, previous_element=None, next_element=None, previous_sibling=None, next_sibling=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous_element = previous_element if previous_element is not None: self.previous_element.next_element = self self.next_element = next_element if self.next_element: self.next_element.previous_element = self self.next_sibling = next_sibling if self.next_sibling: self.next_sibling.previous_sibling = self if (not previous_sibling and self.parent is not None and self.parent.contents): previous_sibling = self.parent.contents[-1] self.previous_sibling = previous_sibling if previous_sibling: self.previous_sibling.next_sibling = self nextSibling = _alias("next_sibling") # BS3 previousSibling = _alias("previous_sibling") # BS3 def replace_with(self, replace_with): if not self.parent: raise ValueError( "Cannot replace one element with another when the" "element to be replaced is not part of a tree.") if replace_with is self: return if replace_with is self.parent: raise ValueError("Cannot replace a Tag with its parent.") old_parent = self.parent my_index = self.parent.index(self) self.extract() old_parent.insert(my_index, replace_with) return self replaceWith = replace_with # BS3 def unwrap(self): my_parent = self.parent if not self.parent: raise ValueError( "Cannot replace an element with its contents when that" "element is not part of a tree.") my_index = self.parent.index(self) self.extract() for child in reversed(self.contents[:]): my_parent.insert(my_index, child) return self replace_with_children = unwrap replaceWithChildren = unwrap # BS3 def wrap(self, wrap_inside): me = self.replace_with(wrap_inside) wrap_inside.append(me) return wrap_inside def extract(self): """Destructively rips this element out of the tree.""" if self.parent is not None: del self.parent.contents[self.parent.index(self)] #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. last_child = self._last_descendant() next_element = last_child.next_element if (self.previous_element is not None and self.previous_element is not next_element): self.previous_element.next_element = next_element if next_element is not None and next_element is not self.previous_element: next_element.previous_element = self.previous_element self.previous_element = None last_child.next_element = None self.parent = None if (self.previous_sibling is not None and self.previous_sibling is not self.next_sibling): self.previous_sibling.next_sibling = self.next_sibling if (self.next_sibling is not None and self.next_sibling is not self.previous_sibling): self.next_sibling.previous_sibling = self.previous_sibling self.previous_sibling = self.next_sibling = None return self def _last_descendant(self, is_initialized=True, accept_self=True): "Finds the last element beneath this object to be parsed." if is_initialized and self.next_sibling: last_child = self.next_sibling.previous_element else: last_child = self while isinstance(last_child, Tag) and last_child.contents: last_child = last_child.contents[-1] if not accept_self and last_child is self: last_child = None return last_child # BS3: Not part of the API! _lastRecursiveChild = _last_descendant def insert(self, position, new_child): if new_child is None: raise ValueError("Cannot insert None into a tag.") if new_child is self: raise ValueError("Cannot insert a tag into itself.") if (isinstance(new_child, basestring) and not isinstance(new_child, NavigableString)): new_child = NavigableString(new_child) position = min(position, len(self.contents)) if hasattr(new_child, 'parent') and new_child.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if new_child.parent is self: current_index = self.index(new_child) if current_index < position: # We're moving this element further down the list # of this object's children. That means that when # we extract this element, our target index will # jump down one. position -= 1 new_child.extract() new_child.parent = self previous_child = None if position == 0: new_child.previous_sibling = None new_child.previous_element = self else: previous_child = self.contents[position - 1] new_child.previous_sibling = previous_child new_child.previous_sibling.next_sibling = new_child new_child.previous_element = previous_child._last_descendant(False) if new_child.previous_element is not None: new_child.previous_element.next_element = new_child new_childs_last_element = new_child._last_descendant(False) if position >= len(self.contents): new_child.next_sibling = None parent = self parents_next_sibling = None while parents_next_sibling is None and parent is not None: parents_next_sibling = parent.next_sibling parent = parent.parent if parents_next_sibling is not None: # We found the element that comes next in the document. break if parents_next_sibling is not None: new_childs_last_element.next_element = parents_next_sibling else: # The last element of this tag is the last element in # the document. new_childs_last_element.next_element = None else: next_child = self.contents[position] new_child.next_sibling = next_child if new_child.next_sibling is not None: new_child.next_sibling.previous_sibling = new_child new_childs_last_element.next_element = next_child if new_childs_last_element.next_element is not None: new_childs_last_element.next_element.previous_element = new_childs_last_element self.contents.insert(position, new_child) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def insert_before(self, predecessor): """Makes the given element the immediate predecessor of this one. The two elements will have the same parent, and the given element will be immediately before this one. """ if self is predecessor: raise ValueError("Can't insert an element before itself.") parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'before' has no meaning.") # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(predecessor, PageElement): predecessor.extract() index = parent.index(self) parent.insert(index, predecessor) def insert_after(self, successor): """Makes the given element the immediate successor of this one. The two elements will have the same parent, and the given element will be immediately after this one. """ if self is successor: raise ValueError("Can't insert an element after itself.") parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'after' has no meaning.") # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(successor, PageElement): successor.extract() index = parent.index(self) parent.insert(index+1, successor) def find_next(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._find_one(self.find_all_next, name, attrs, text, **kwargs) findNext = find_next # BS3 def find_all_next(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._find_all(name, attrs, text, limit, self.next_elements, **kwargs) findAllNext = find_all_next # BS3 def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._find_one(self.find_next_siblings, name, attrs, text, **kwargs) findNextSibling = find_next_sibling # BS3 def find_next_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._find_all(name, attrs, text, limit, self.next_siblings, **kwargs) findNextSiblings = find_next_siblings # BS3 fetchNextSiblings = find_next_siblings # BS2 def find_previous(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._find_one( self.find_all_previous, name, attrs, text, **kwargs) findPrevious = find_previous # BS3 def find_all_previous(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._find_all(name, attrs, text, limit, self.previous_elements, **kwargs) findAllPrevious = find_all_previous # BS3 fetchPrevious = find_all_previous # BS2 def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._find_one(self.find_previous_siblings, name, attrs, text, **kwargs) findPreviousSibling = find_previous_sibling # BS3 def find_previous_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._find_all(name, attrs, text, limit, self.previous_siblings, **kwargs) findPreviousSiblings = find_previous_siblings # BS3 fetchPreviousSiblings = find_previous_siblings # BS2 def find_parent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _find_one because findParents takes a different # set of arguments. r = None l = self.find_parents(name, attrs, 1, **kwargs) if l: r = l[0] return r findParent = find_parent # BS3 def find_parents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._find_all(name, attrs, None, limit, self.parents, **kwargs) findParents = find_parents # BS3 fetchParents = find_parents # BS2 @property def next(self): return self.next_element @property def previous(self): return self.previous_element #These methods do the real heavy lifting. def _find_one(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _find_all(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if text is None and 'string' in kwargs: text = kwargs['string'] del kwargs['string'] if isinstance(name, SoupStrainer): strainer = name else: strainer = SoupStrainer(name, attrs, text, **kwargs) if text is None and not limit and not attrs and not kwargs: if name is True or name is None: # Optimization to find all tags. result = (element for element in generator if isinstance(element, Tag)) return ResultSet(strainer, result) elif isinstance(name, basestring): # Optimization to find all tags with a given name. if name.count(':') == 1: # This is a name with a prefix. prefix, name = name.split(':', 1) else: prefix = None result = (element for element in generator if isinstance(element, Tag) and element.name == name and (prefix is None or element.prefix == prefix) ) return ResultSet(strainer, result) results = ResultSet(strainer) while True: try: i = next(generator) except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These generators can be used to navigate starting from both #NavigableStrings and Tags. @property def next_elements(self): i = self.next_element while i is not None: yield i i = i.next_element @property def next_siblings(self): i = self.next_sibling while i is not None: yield i i = i.next_sibling @property def previous_elements(self): i = self.previous_element while i is not None: yield i i = i.previous_element @property def previous_siblings(self): i = self.previous_sibling while i is not None: yield i i = i.previous_sibling @property def parents(self): i = self.parent while i is not None: yield i i = i.parent # Methods for supporting CSS selectors. tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$') # /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/ # \---------------------------/ \---/\-------------/ \-------/ # | | | | # | | | The value # | | ~,|,^,$,* or = # | Attribute # Tag attribselect_re = re.compile( r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>[\w-]+)(?P<operator>[=~\|\^\$\*]?)' + r'=?"?(?P<value>[^\]"]*)"?\]$' ) def _attr_value_as_string(self, value, default=None): """Force an attribute value into a string representation. A multi-valued attribute will be converted into a space-separated stirng. """ value = self.get(value, default) if isinstance(value, list) or isinstance(value, tuple): value =" ".join(value) return value def _tag_name_matches_and(self, function, tag_name): if not tag_name: return function else: def _match(tag): return tag.name == tag_name and function(tag) return _match def _attribute_checker(self, operator, attribute, value=''): """Create a function that performs a CSS selector operation. Takes an operator, attribute and optional value. Returns a function that will return True for elements that match that combination. """ if operator == '=': # string representation of `attribute` is equal to `value` return lambda el: el._attr_value_as_string(attribute) == value elif operator == '~': # space-separated list representation of `attribute` # contains `value` def _includes_value(element): attribute_value = element.get(attribute, []) if not isinstance(attribute_value, list): attribute_value = attribute_value.split() return value in attribute_value return _includes_value elif operator == '^': # string representation of `attribute` starts with `value` return lambda el: el._attr_value_as_string( attribute, '').startswith(value) elif operator == '$': # string representation of `attribute` ends with `value` return lambda el: el._attr_value_as_string( attribute, '').endswith(value) elif operator == '*': # string representation of `attribute` contains `value` return lambda el: value in el._attr_value_as_string(attribute, '') elif operator == '|': # string representation of `attribute` is either exactly # `value` or starts with `value` and then a dash. def _is_or_starts_with_dash(element): attribute_value = element._attr_value_as_string(attribute, '') return (attribute_value == value or attribute_value.startswith( value + '-')) return _is_or_starts_with_dash else: return lambda el: el.has_attr(attribute) # Old non-property versions of the generators, for backwards # compatibility with BS3. def nextGenerator(self): return self.next_elements def nextSiblingGenerator(self): return self.next_siblings def previousGenerator(self): return self.previous_elements def previousSiblingGenerator(self): return self.previous_siblings def parentGenerator(self): return self.parents class NavigableString(unicode, PageElement): PREFIX = '' SUFFIX = '' # We can't tell just by looking at a string whether it's contained # in an XML document or an HTML document. known_xml = None def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): u = unicode.__new__(cls, value) else: u = unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) u.setup() return u def __copy__(self): """A copy of a NavigableString has the same contents and class as the original, but it is not connected to the parse tree. """ return type(self)(self) def __getnewargs__(self): return (unicode(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError( "'%s' object has no attribute '%s'" % ( self.__class__.__name__, attr)) def output_ready(self, formatter="minimal"): output = self.format_string(self, formatter) return self.PREFIX + output + self.SUFFIX @property def name(self): return None @name.setter def name(self, name): raise AttributeError("A NavigableString cannot be given a name.") class PreformattedString(NavigableString): """A NavigableString not subject to the normal formatting rules. The string will be passed into the formatter (to trigger side effects), but the return value will be ignored. """ def output_ready(self, formatter="minimal"): """CData strings are passed into the formatter. But the return value is ignored.""" self.format_string(self, formatter) return self.PREFIX + self + self.SUFFIX class CData(PreformattedString): PREFIX = u'<![CDATA[' SUFFIX = u']]>' class ProcessingInstruction(PreformattedString): """A SGML processing instruction.""" PREFIX = u'<?' SUFFIX = u'>' class XMLProcessingInstruction(ProcessingInstruction): """An XML processing instruction.""" PREFIX = u'<?' SUFFIX = u'?>' class Comment(PreformattedString): PREFIX = u'<!--' SUFFIX = u'-->' class Declaration(PreformattedString): PREFIX = u'<?' SUFFIX = u'?>' class Doctype(PreformattedString): @classmethod def for_name_and_ids(cls, name, pub_id, system_id): value = name or '' if pub_id is not None: value += ' PUBLIC "%s"' % pub_id if system_id is not None: value += ' "%s"' % system_id elif system_id is not None: value += ' SYSTEM "%s"' % system_id return Doctype(value) PREFIX = u'<!DOCTYPE ' SUFFIX = u'>\n' class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def __init__(self, parser=None, builder=None, name=None, namespace=None, prefix=None, attrs=None, parent=None, previous=None, is_xml=None): "Basic constructor." if parser is None: self.parser_class = None else: # We don't actually store the parser object: that lets extracted # chunks be garbage-collected. self.parser_class = parser.__class__ if name is None: raise ValueError("No value provided for new tag's name.") self.name = name self.namespace = namespace self.prefix = prefix if builder is not None: preserve_whitespace_tags = builder.preserve_whitespace_tags else: if is_xml: preserve_whitespace_tags = [] else: preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags self.preserve_whitespace_tags = preserve_whitespace_tags if attrs is None: attrs = {} elif attrs: if builder is not None and builder.cdata_list_attributes: attrs = builder._replace_cdata_list_attribute_values( self.name, attrs) else: attrs = dict(attrs) else: attrs = dict(attrs) # If possible, determine ahead of time whether this tag is an # XML tag. if builder: self.known_xml = builder.is_xml else: self.known_xml = is_xml self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False # Set up any substitutions, such as the charset in a META tag. if builder is not None: builder.set_up_substitutions(self) self.can_be_empty_element = builder.can_be_empty_element(name) else: self.can_be_empty_element = False parserClass = _alias("parser_class") # BS3 def __copy__(self): """A copy of a Tag is a new Tag, unconnected to the parse tree. Its contents are a copy of the old Tag's contents. """ clone = type(self)(None, self.builder, self.name, self.namespace, self.prefix, self.attrs, is_xml=self._is_xml) for attr in ('can_be_empty_element', 'hidden'): setattr(clone, attr, getattr(self, attr)) for child in self.contents: clone.append(child.__copy__()) return clone @property def is_empty_element(self): """Is this tag an empty-element tag? (aka a self-closing tag) A tag that has contents is never an empty-element tag. A tag that has no contents may or may not be an empty-element tag. It depends on the builder used to create the tag. If the builder has a designated list of empty-element tags, then only a tag whose name shows up in that list is considered an empty-element tag. If the builder has no designated list of empty-element tags, then any tag with no contents is an empty-element tag. """ return len(self.contents) == 0 and self.can_be_empty_element isSelfClosing = is_empty_element # BS3 @property def string(self): """Convenience property to get the single string within this tag. :Return: If this tag has a single string child, return value is that string. If this tag has no children, or more than one child, return value is None. If this tag has one child tag, return value is the 'string' attribute of the child tag, recursively. """ if len(self.contents) != 1: return None child = self.contents[0] if isinstance(child, NavigableString): return child return child.string @string.setter def string(self, string): self.clear() self.append(string.__class__(string)) def _all_strings(self, strip=False, types=(NavigableString, CData)): """Yield all strings of certain classes, possibly stripping them. By default, yields only NavigableString and CData objects. So no comments, processing instructions, etc. """ for descendant in self.descendants: if ( (types is None and not isinstance(descendant, NavigableString)) or (types is not None and type(descendant) not in types)): continue if strip: descendant = descendant.strip() if len(descendant) == 0: continue yield descendant strings = property(_all_strings) @property def stripped_strings(self): for string in self._all_strings(True): yield string def get_text(self, separator=u"", strip=False, types=(NavigableString, CData)): """ Get all child strings, concatenated using the given separator. """ return separator.join([s for s in self._all_strings( strip, types=types)]) getText = get_text text = property(get_text) def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() i = self while i is not None: next = i.next_element i.__dict__.clear() i.contents = [] i = next def clear(self, decompose=False): """ Extract all children. If decompose is True, decompose instead. """ if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract() def index(self, element): """ Find the index of a child by identity, not value. Avoids issues with tag.contents.index(element) getting the index of equal elements. """ for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self.attrs.get(key, default) def get_attribute_list(self, key, default=None): """The same as get(), but always returns a list.""" value = self.get(key, default) if not isinstance(value, list): value = [value] return value def has_attr(self, key): return key in self.attrs def __hash__(self): return str(self).__hash__() def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self.attrs[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self.attrs[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." self.attrs.pop(key, None) def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its find_all() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return self.find_all(*args, **kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.endswith('Tag'): # BS3: soup.aTag -> "soup.find("a") tag_name = tag[:-3] warnings.warn( '.%sTag is deprecated, use .find("%s") instead.' % ( tag_name, tag_name)) return self.find(tag_name) # We special case contents to avoid recursion. elif not tag.startswith("__") and not tag == "contents": return self.find(tag) raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__, tag)) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag.""" if self is other: return True if (not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other)): return False for i, my_child in enumerate(self.contents): if my_child != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding="unicode-escape"): """Renders this tag as a string.""" if PY3K: # "The return value must be a string object", i.e. Unicode return self.decode() else: # "The return value must be a string object", i.e. a bytestring. # By convention, the return value of __repr__ should also be # an ASCII string. return self.encode(encoding) def __unicode__(self): return self.decode() def __str__(self): if PY3K: return self.decode() else: return self.encode() if PY3K: __str__ = __repr__ = __unicode__ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, indent_level=None, formatter="minimal", errors="xmlcharrefreplace"): # Turn the data structure into Unicode, then encode the # Unicode. u = self.decode(indent_level, encoding, formatter) return u.encode(encoding, errors) def _should_pretty_print(self, indent_level): """Should this tag be pretty-printed?""" return ( indent_level is not None and self.name not in self.preserve_whitespace_tags ) def decode(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Returns a Unicode representation of this tag and its contents. :param eventual_encoding: The tag is destined to be encoded into this encoding. This method is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. """ # First off, turn a string formatter into a function. This # will stop the lookup from happening over and over again. if not callable(formatter): formatter = self._formatter_for_name(formatter) attrs = [] if self.attrs: for key, val in sorted(self.attrs.items()): if val is None: decoded = key else: if isinstance(val, list) or isinstance(val, tuple): val = ' '.join(val) elif not isinstance(val, basestring): val = unicode(val) elif ( isinstance(val, AttributeValueWithCharsetSubstitution) and eventual_encoding is not None): val = val.encode(eventual_encoding) text = self.format_string(val, formatter) decoded = ( unicode(key) + '=' + EntitySubstitution.quoted_attribute_value(text)) attrs.append(decoded) close = '' closeTag = '' prefix = '' if self.prefix: prefix = self.prefix + ":" if self.is_empty_element: close = '/' else: closeTag = '</%s%s>' % (prefix, self.name) pretty_print = self._should_pretty_print(indent_level) space = '' indent_space = '' if indent_level is not None: indent_space = (' ' * (indent_level - 1)) if pretty_print: space = indent_space indent_contents = indent_level + 1 else: indent_contents = None contents = self.decode_contents( indent_contents, eventual_encoding, formatter) if self.hidden: # This is the 'document root' object. s = contents else: s = [] attribute_string = '' if attrs: attribute_string = ' ' + ' '.join(attrs) if indent_level is not None: # Even if this particular tag is not pretty-printed, # we should indent up to the start of the tag. s.append(indent_space) s.append('<%s%s%s%s>' % ( prefix, self.name, attribute_string, close)) if pretty_print: s.append("\n") s.append(contents) if pretty_print and contents and contents[-1] != "\n": s.append("\n") if pretty_print and closeTag: s.append(space) s.append(closeTag) if indent_level is not None and closeTag and self.next_sibling: # Even if this particular tag is not pretty-printed, # we're now done with the tag, and we should add a # newline if appropriate. s.append("\n") s = ''.join(s) return s def prettify(self, encoding=None, formatter="minimal"): if encoding is None: return self.decode(True, formatter=formatter) else: return self.encode(encoding, True, formatter=formatter) def decode_contents(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this tag as a Unicode string. :param indent_level: Each line of the rendering will be indented this many spaces. :param eventual_encoding: The tag is destined to be encoded into this encoding. This method is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. :param formatter: The output formatter responsible for converting entities to Unicode characters. """ # First off, turn a string formatter into a function. This # will stop the lookup from happening over and over again. if not callable(formatter): formatter = self._formatter_for_name(formatter) pretty_print = (indent_level is not None) s = [] for c in self: text = None if isinstance(c, NavigableString): text = c.output_ready(formatter) elif isinstance(c, Tag): s.append(c.decode(indent_level, eventual_encoding, formatter)) if text and indent_level and not self.name == 'pre': text = text.strip() if text: if pretty_print and not self.name == 'pre': s.append(" " * (indent_level - 1)) s.append(text) if pretty_print and not self.name == 'pre': s.append("\n") return ''.join(s) def encode_contents( self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this tag as a bytestring. :param indent_level: Each line of the rendering will be indented this many spaces. :param eventual_encoding: The bytestring will be in this encoding. :param formatter: The output formatter responsible for converting entities to Unicode characters. """ contents = self.decode_contents(indent_level, encoding, formatter) return contents.encode(encoding) # Old method for BS3 compatibility def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): if not prettyPrint: indentLevel = None return self.encode_contents( indent_level=indentLevel, encoding=encoding) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.find_all(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def find_all(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.descendants if not recursive: generator = self.children return self._find_all(name, attrs, text, limit, generator, **kwargs) findAll = find_all # BS3 findChildren = find_all # BS2 #Generator methods @property def children(self): # return iter() to make the purpose of the method clear return iter(self.contents) # XXX This seems to be untested. @property def descendants(self): if not len(self.contents): return stopNode = self._last_descendant().next_element current = self.contents[0] while current is not stopNode: yield current current = current.next_element # CSS selector code _selector_combinators = ['>', '+', '~'] _select_debug = False quoted_colon = re.compile('"[^"]*:[^"]*"') def select_one(self, selector): """Perform a CSS selection operation on the current element.""" value = self.select(selector, limit=1) if value: return value[0] return None def select(self, selector, _candidate_generator=None, limit=None): """Perform a CSS selection operation on the current element.""" # Handle grouping selectors if ',' exists, ie: p,a if ',' in selector: context = [] for partial_selector in selector.split(','): partial_selector = partial_selector.strip() if partial_selector == '': raise ValueError('Invalid group selection syntax: %s' % selector) candidates = self.select(partial_selector, limit=limit) for candidate in candidates: if candidate not in context: context.append(candidate) if limit and len(context) >= limit: break return context tokens = shlex.split(selector) current_context = [self] if tokens[-1] in self._selector_combinators: raise ValueError( 'Final combinator "%s" is missing an argument.' % tokens[-1]) if self._select_debug: print 'Running CSS selector "%s"' % selector for index, token in enumerate(tokens): new_context = [] new_context_ids = set([]) if tokens[index-1] in self._selector_combinators: # This token was consumed by the previous combinator. Skip it. if self._select_debug: print ' Token was consumed by the previous combinator.' continue if self._select_debug: print ' Considering token "%s"' % token recursive_candidate_generator = None tag_name = None # Each operation corresponds to a checker function, a rule # for determining whether a candidate matches the # selector. Candidates are generated by the active # iterator. checker = None m = self.attribselect_re.match(token) if m is not None: # Attribute selector tag_name, attribute, operator, value = m.groups() checker = self._attribute_checker(operator, attribute, value) elif '#' in token: # ID selector tag_name, tag_id = token.split('#', 1) def id_matches(tag): return tag.get('id', None) == tag_id checker = id_matches elif '.' in token: # Class selector tag_name, klass = token.split('.', 1) classes = set(klass.split('.')) def classes_match(candidate): return classes.issubset(candidate.get('class', [])) checker = classes_match elif ':' in token and not self.quoted_colon.search(token): # Pseudo-class tag_name, pseudo = token.split(':', 1) if tag_name == '': raise ValueError( "A pseudo-class must be prefixed with a tag name.") pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo) found = [] if pseudo_attributes is None: pseudo_type = pseudo pseudo_value = None else: pseudo_type, pseudo_value = pseudo_attributes.groups() if pseudo_type == 'nth-of-type': try: pseudo_value = int(pseudo_value) except: raise NotImplementedError( 'Only numeric values are currently supported for the nth-of-type pseudo-class.') if pseudo_value < 1: raise ValueError( 'nth-of-type pseudo-class value must be at least 1.') class Counter(object): def __init__(self, destination): self.count = 0 self.destination = destination def nth_child_of_type(self, tag): self.count += 1 if self.count == self.destination: return True else: return False checker = Counter(pseudo_value).nth_child_of_type else: raise NotImplementedError( 'Only the following pseudo-classes are implemented: nth-of-type.') elif token == '*': # Star selector -- matches everything pass elif token == '>': # Run the next token as a CSS selector against the # direct children of each tag in the current context. recursive_candidate_generator = lambda tag: tag.children elif token == '~': # Run the next token as a CSS selector against the # siblings of each tag in the current context. recursive_candidate_generator = lambda tag: tag.next_siblings elif token == '+': # For each tag in the current context, run the next # token as a CSS selector against the tag's next # sibling that's a tag. def next_tag_sibling(tag): yield tag.find_next_sibling(True) recursive_candidate_generator = next_tag_sibling elif self.tag_name_re.match(token): # Just a tag name. tag_name = token else: raise ValueError( 'Unsupported or invalid CSS selector: "%s"' % token) if recursive_candidate_generator: # This happens when the selector looks like "> foo". # # The generator calls select() recursively on every # member of the current context, passing in a different # candidate generator and a different selector. # # In the case of "> foo", the candidate generator is # one that yields a tag's direct children (">"), and # the selector is "foo". next_token = tokens[index+1] def recursive_select(tag): if self._select_debug: print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs) print '-' * 40 for i in tag.select(next_token, recursive_candidate_generator): if self._select_debug: print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs) yield i if self._select_debug: print '-' * 40 _use_candidate_generator = recursive_select elif _candidate_generator is None: # By default, a tag's candidates are all of its # children. If tag_name is defined, only yield tags # with that name. if self._select_debug: if tag_name: check = "[any]" else: check = tag_name print ' Default candidate generator, tag name="%s"' % check if self._select_debug: # This is redundant with later code, but it stops # a bunch of bogus tags from cluttering up the # debug log. def default_candidate_generator(tag): for child in tag.descendants: if not isinstance(child, Tag): continue if tag_name and not child.name == tag_name: continue yield child _use_candidate_generator = default_candidate_generator else: _use_candidate_generator = lambda tag: tag.descendants else: _use_candidate_generator = _candidate_generator count = 0 for tag in current_context: if self._select_debug: print " Running candidate generator on %s %s" % ( tag.name, repr(tag.attrs)) for candidate in _use_candidate_generator(tag): if not isinstance(candidate, Tag): continue if tag_name and candidate.name != tag_name: continue if checker is not None: try: result = checker(candidate) except StopIteration: # The checker has decided we should no longer # run the generator. break if checker is None or result: if self._select_debug: print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)) if id(candidate) not in new_context_ids: # If a tag matches a selector more than once, # don't include it in the context more than once. new_context.append(candidate) new_context_ids.add(id(candidate)) elif self._select_debug: print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs)) current_context = new_context if limit and len(current_context) >= limit: current_context = current_context[:limit] if self._select_debug: print "Final verdict:" for i in current_context: print " %s %s" % (i.name, i.attrs) return current_context # Old names for backwards compatibility def childGenerator(self): return self.children def recursiveChildGenerator(self): return self.descendants def has_key(self, key): """This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway.""" warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % ( key)) return self.has_attr(key) # Next, a couple classes to represent queries and their results. class SoupStrainer(object): """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = self._normalize_search_value(name) if not isinstance(attrs, dict): # Treat a non-dict value for attrs as a search for the 'class' # attribute. kwargs['class'] = attrs attrs = None if 'class_' in kwargs: # Treat class_="foo" as a search for the 'class' # attribute, overriding any non-dict value for attrs. kwargs['class'] = kwargs['class_'] del kwargs['class_'] if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs normalized_attrs = {} for key, value in attrs.items(): normalized_attrs[key] = self._normalize_search_value(value) self.attrs = normalized_attrs self.text = self._normalize_search_value(text) def _normalize_search_value(self, value): # Leave it alone if it's a Unicode string, a callable, a # regular expression, a boolean, or None. if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match') or isinstance(value, bool) or value is None): return value # If it's a bytestring, convert it to Unicode, treating it as UTF-8. if isinstance(value, bytes): return value.decode("utf8") # If it's listlike, convert it into a list of strings. if hasattr(value, '__iter__'): new_value = [] for v in value: if (hasattr(v, '__iter__') and not isinstance(v, bytes) and not isinstance(v, unicode)): # This is almost certainly the user's mistake. In the # interests of avoiding infinite loops, we'll let # it through as-is rather than doing a recursive call. new_value.append(v) else: new_value.append(self._normalize_search_value(v)) return new_value # Otherwise, convert it into a Unicode string. # The unicode(str()) thing is so this will do the same thing on Python 2 # and Python 3. return unicode(str(value)) def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def search_tag(self, markup_name=None, markup_attrs={}): found = None markup = None if isinstance(markup_name, Tag): markup = markup_name markup_attrs = markup call_function_with_tag_data = ( isinstance(self.name, collections.Callable) and not isinstance(markup_name, Tag)) if ((not self.name) or call_function_with_tag_data or (markup and self._matches(markup, self.name)) or (not markup and self._matches(markup_name, self.name))): if call_function_with_tag_data: match = self.name(markup_name, markup_attrs) else: match = True markup_attr_map = None for attr, match_against in list(self.attrs.items()): if not markup_attr_map: if hasattr(markup_attrs, 'get'): markup_attr_map = markup_attrs else: markup_attr_map = {} for k, v in markup_attrs: markup_attr_map[k] = v attr_value = markup_attr_map.get(attr) if not self._matches(attr_value, match_against): match = False break if match: if markup: found = markup else: found = markup_name if found and self.text and not self._matches(found.string, self.text): found = None return found searchTag = search_tag def search(self, markup): # print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text or self.name or self.attrs: found = self.search_tag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, basestring): if not self.name and not self.attrs and self._matches(markup, self.text): found = markup else: raise Exception( "I don't know how to match against a %s" % markup.__class__) return found def _matches(self, markup, match_against, already_tried=None): # print u"Matching %s against %s" % (markup, match_against) result = False if isinstance(markup, list) or isinstance(markup, tuple): # This should only happen when searching a multi-valued attribute # like 'class'. for item in markup: if self._matches(item, match_against): return True # We didn't match any particular value of the multivalue # attribute, but maybe we match the attribute value when # considered as a string. if self._matches(' '.join(markup), match_against): return True return False if match_against is True: # True matches any non-None value. return markup is not None if isinstance(match_against, collections.Callable): return match_against(markup) # Custom callables take the tag as an argument, but all # other ways of matching match the tag name as a string. original_markup = markup if isinstance(markup, Tag): markup = markup.name # Ensure that `markup` is either a Unicode string, or None. markup = self._normalize_search_value(markup) if markup is None: # None matches None, False, an empty string, an empty list, and so on. return not match_against if (hasattr(match_against, '__iter__') and not isinstance(match_against, basestring)): # We're asked to match against an iterable of items. # The markup must be match at least one item in the # iterable. We'll try each one in turn. # # To avoid infinite recursion we need to keep track of # items we've already seen. if not already_tried: already_tried = set() for item in match_against: if item.__hash__: key = item else: key = id(item) if key in already_tried: continue else: already_tried.add(key) if self._matches(original_markup, item, already_tried): return True else: return False # Beyond this point we might need to run the test twice: once against # the tag's name and once against its prefixed name. match = False if not match and isinstance(match_against, unicode): # Exact string match match = markup == match_against if not match and hasattr(match_against, 'search'): # Regexp match return match_against.search(markup) if (not match and isinstance(original_markup, Tag) and original_markup.prefix): # Try the whole thing again with the prefixed tag name. return self._matches( original_markup.prefix + ':' + original_markup.name, match_against ) return match class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source, result=()): super(ResultSet, self).__init__(result) self.source = source def __getattr__(self, key): raise AttributeError( "ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?" % key )
68,821
Python
.py
1,547
32.621849
175
0.574397
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,808
diagnose.py
evilhero_mylar/lib/bs4/diagnose.py
"""Diagnostic functions, mainly for use when doing tech support.""" # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __license__ = "MIT" import cProfile from StringIO import StringIO from HTMLParser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def diagnose(data): """Diagnostic suite for isolating common problems.""" print "Diagnostic running on Beautiful Soup %s" % __version__ print "Python version %s" % sys.version basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: for builder in builder_registry.builders: if name in builder.features: break else: basic_parsers.remove(name) print ( "I noticed that %s is not installed. Installing it may help." % name) if 'lxml' in basic_parsers: basic_parsers.append(["lxml", "xml"]) try: from lxml import etree print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) except ImportError, e: print ( "lxml is not installed or couldn't be imported.") if 'html5lib' in basic_parsers: try: import html5lib print "Found html5lib version %s" % html5lib.__version__ except ImportError, e: print ( "html5lib is not installed or couldn't be imported.") if hasattr(data, 'read'): data = data.read() elif os.path.exists(data): print '"%s" looks like a filename. Reading data from the file.' % data with open(data) as fp: data = fp.read() elif data.startswith("http:") or data.startswith("https:"): print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." return print for parser in basic_parsers: print "Trying to parse your markup with %s" % parser success = False try: soup = BeautifulSoup(data, parser) success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "Here's what %s did with the markup:" % parser print soup.prettify() print "-" * 80 def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. """ from lxml import etree for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): print("%s, %4s, %s" % (event, element.tag, element.text)) class AnnouncingParser(HTMLParser): """Announces HTMLParser parse events, without doing anything else.""" def _p(self, s): print(s) def handle_starttag(self, name, attrs): self._p("%s START" % name) def handle_endtag(self, name): self._p("%s END" % name) def handle_data(self, data): self._p("%s DATA" % data) def handle_charref(self, name): self._p("%s CHARREF" % name) def handle_entityref(self, name): self._p("%s ENTITYREF" % name) def handle_comment(self, data): self._p("%s COMMENT" % data) def handle_decl(self, data): self._p("%s DECL" % data) def unknown_decl(self, data): self._p("%s UNKNOWN-DECL" % data) def handle_pi(self, data): self._p("%s PI" % data) def htmlparser_trace(data): """Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. """ parser = AnnouncingParser() parser.feed(data) _vowels = "aeiou" _consonants = "bcdfghjklmnpqrstvwxyz" def rword(length=5): "Generate a random word-like string." s = '' for i in range(length): if i % 2 == 0: t = _consonants else: t = _vowels s += random.choice(t) return s def rsentence(length=4): "Generate a random sentence-like string." return " ".join(rword(random.randint(4,9)) for i in range(length)) def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" print "Comparative parser benchmark on Beautiful Soup %s" % __version__ data = rdoc(num_elements) print "Generated a large invalid HTML document (%d bytes)." % len(data) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False try: a = time.time() soup = BeautifulSoup(data, parser) b = time.time() success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) from lxml import etree a = time.time() etree.HTML(data) b = time.time() print "Raw lxml parsed the markup in %.2fs." % (b-a) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() print "Raw html5lib parsed the markup in %.2fs." % (b-a) def profile(num_elements=100000, parser="lxml"): filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name data = rdoc(num_elements) vars = dict(bs4=bs4, data=data, parser=parser) cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) stats = pstats.Stats(filename) # stats.strip_dirs() stats.sort_stats("cumulative") stats.print_stats('_html5lib|bs4', 50) if __name__ == '__main__': diagnose(sys.stdin.read())
6,747
Python
.py
180
30.2
128
0.619091
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,809
__init__.py
evilhero_mylar/lib/bs4/__init__.py
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup uses a pluggable XML or HTML parser to parse a (possibly invalid) document into a tree representation. Beautiful Soup provides methods and Pythonic idioms that make it easy to navigate, search, and modify the parse tree. Beautiful Soup works with Python 2.7 and up. It works better if lxml and/or html5lib is installed. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ """ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "4.6.0" __copyright__ = "Copyright (c) 2004-2017 Leonard Richardson" __license__ = "MIT" __all__ = ['BeautifulSoup'] import os import re import traceback import warnings from .builder import builder_registry, ParserRejectedMarkup from .dammit import UnicodeDammit from .element import ( CData, Comment, DEFAULT_OUTPUT_ENCODING, Declaration, Doctype, NavigableString, PageElement, ProcessingInstruction, ResultSet, SoupStrainer, Tag, ) # The very first thing we do is give a useful error if someone is # running this code under Python 3 without converting it. 'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' class BeautifulSoup(Tag): """ This class defines the basic interface called by the tree builders. These methods will be called by the parser: reset() feed(markup) The tree builder may call these methods from its feed() implementation: handle_starttag(name, attrs) # See note about return value handle_endtag(name) handle_data(data) # Appends to the current data node endData(containerClass=NavigableString) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's <br> tag), call handle_starttag and then handle_endtag. """ ROOT_TAG_NAME = u'[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup(YOUR_MARKUP})\n\nto this:\n\n BeautifulSoup(YOUR_MARKUP, \"%(parser)s\")\n" def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, **kwargs): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser.""" if 'convertEntities' in kwargs: warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. Suggest you use " "features='lxml' for HTML and features='lxml-xml' for " "XML.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name)) value = kwargs[old_name] del kwargs[old_name] return value return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if from_encoding and isinstance(markup, unicode): warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") from_encoding = None if len(kwargs) > 0: arg = kwargs.keys().pop() raise TypeError( "__init__() got an unexpected keyword argument '%s'" % arg) if builder is None: original_features = features if isinstance(features, basestring): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) builder = builder_class() if not (original_features == builder.NAME or original_features in builder.ALTERNATE_NAMES): if builder.is_xml: markup_type = "XML" else: markup_type = "HTML" caller = traceback.extract_stack()[0] filename = caller[0] line_number = caller[1] warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict( filename=filename, line_number=line_number, parser=builder.NAME, markup_type=markup_type)) self.builder = builder self.is_xml = builder.is_xml self.known_xml = self.is_xml self.builder.soup = self self.parse_only = parse_only if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256 and ( (isinstance(markup, bytes) and not b'<' in markup) or (isinstance(markup, unicode) and not u'<' in markup) ): # Print out warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # just in case that's what the user really wants. if (isinstance(markup, unicode) and not os.path.supports_unicode_filenames): possible_filename = markup.encode("utf8") else: possible_filename = markup is_file = False try: is_file = os.path.exists(possible_filename) except Exception, e: # This is almost certainly a problem involving # characters not valid in filenames on this # system. Just let it go. pass if is_file: if isinstance(markup, unicode): markup = markup.encode("utf8") warnings.warn( '"%s" looks like a filename, not markup. You should' ' probably open this file and pass the filehandle into' ' Beautiful Soup.' % markup) self._check_markup_is_url(markup) for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup( markup, from_encoding, exclude_encodings=exclude_encodings)): self.reset() try: self._feed() break except ParserRejectedMarkup: pass # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def __copy__(self): copy = type(self)( self.encode('utf-8'), builder=self.builder, from_encoding='utf-8' ) # Although we encoded the tree to UTF-8, that may not have # been the encoding of the original markup. Set the copy's # .original_encoding to reflect the original object's # .original_encoding. copy.original_encoding = self.original_encoding return copy def __getstate__(self): # Frequently a tree builder can't be pickled. d = dict(self.__dict__) if 'builder' in d and not self.builder.picklable: d['builder'] = None return d @staticmethod def _check_markup_is_url(markup): """ Check if markup looks like it's actually a url and raise a warning if so. Markup can be unicode or str (py2) / bytes (py3). """ if isinstance(markup, bytes): space = b' ' cant_start_with = (b"http:", b"https:") elif isinstance(markup, unicode): space = u' ' cant_start_with = (u"http:", u"https:") else: return if any(markup.startswith(prefix) for prefix in cant_start_with): if not space in markup: if isinstance(markup, bytes): decoded_markup = markup.decode('utf-8', 'replace') else: decoded_markup = markup warnings.warn( '"%s" looks like a URL. Beautiful Soup is not an' ' HTTP client. You should probably use an HTTP client like' ' requests to get the document behind the URL, and feed' ' that document to Beautiful Soup.' % decoded_markup ) def _feed(self): # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.preserve_whitespace_tag_stack = [] self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, **attrs): """Create a new tag associated with this soup.""" return Tag(None, self.builder, name, namespace, nsprefix, attrs) def new_string(self, s, subclass=NavigableString): """Create a new NavigableString associated with this soup.""" return subclass(s) def insert_before(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): tag = self.tagStack.pop() if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) def endData(self, containerClass=NavigableString): if self.current_data: current_data = u''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Add an object to the parse tree.""" parent = parent or self.currentTag previous_element = most_recent_element or self._most_recent_element next_element = previous_sibling = next_sibling = None if isinstance(o, Tag): next_element = o.next_element next_sibling = o.next_sibling previous_sibling = o.previous_sibling if not previous_element: previous_element = o.previous_element o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) self._most_recent_element = o parent.contents.append(o) if parent.next_sibling: # This node is being inserted into an element that has # already been parsed. Deal with any dangling references. index = len(parent.contents)-1 while index >= 0: if parent.contents[index] is o: break index -= 1 else: raise ValueError( "Error building tree: supposedly %r was inserted " "into %r after the fact, but I don't see it!" % ( o, parent ) ) if index == 0: previous_element = parent previous_sibling = None else: previous_element = previous_sibling = parent.contents[index-1] if index == len(parent.contents)-1: next_element = parent.next_sibling next_sibling = None else: next_element = next_sibling = parent.contents[index+1] o.previous_element = previous_element if previous_element: previous_element.next_element = o o.next_element = next_element if next_element: next_element.previous_element = o o.next_sibling = next_sibling if next_sibling: next_sibling.previous_sibling = o o.previous_sibling = previous_sibling if previous_sibling: previous_sibling.next_sibling = o def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs): """Push a start tag on to the stack. If this method returns None, the tag was rejected by the SoupStrainer. You should proceed as if the tag had not occurred in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print "Start tag %s: %s" % (name, attrs) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element) if tag is None: return tag if self._most_recent_element: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): #print "End tag: " + name self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Returns a string or Unicode representation of this document. To get Unicode, pass None for encoding.""" if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = u'<?xml version="1.0"%s?>\n' % encoding_part else: prefix = u'' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter) # Alias to make it easier to type import: 'from bs4 import _soup' _s = BeautifulSoup _soup = BeautifulSoup class BeautifulStoneSoup(BeautifulSoup): """Deprecated interface to an XML parser.""" def __init__(self, *args, **kwargs): kwargs['features'] = 'xml' warnings.warn( 'The BeautifulStoneSoup class is deprecated. Instead of using ' 'it, pass features="xml" into the BeautifulSoup constructor.') super(BeautifulStoneSoup, self).__init__(*args, **kwargs) class StopParsing(Exception): pass class FeatureNotFound(ValueError): pass #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
20,420
Python
.py
448
34.267857
572
0.599517
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,810
testing.py
evilhero_mylar/lib/bs4/testing.py
"""Helper classes for tests.""" # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __license__ = "MIT" import pickle import copy import functools import unittest from unittest import TestCase from bs4 import BeautifulSoup from bs4.element import ( CharsetMetaAttributeValue, Comment, ContentMetaAttributeValue, Doctype, SoupStrainer, ) from bs4.builder import HTMLParserTreeBuilder default_builder = HTMLParserTreeBuilder class SoupTest(unittest.TestCase): @property def default_builder(self): return default_builder() def soup(self, markup, **kwargs): """Build a Beautiful Soup object from markup.""" builder = kwargs.pop('builder', self.default_builder) return BeautifulSoup(markup, builder=builder, **kwargs) def document_for(self, markup): """Turn an HTML fragment into a document. The details depend on the builder. """ return self.default_builder.test_fragment_to_document(markup) def assertSoupEquals(self, to_parse, compare_parsed_to=None): builder = self.default_builder obj = BeautifulSoup(to_parse, builder=builder) if compare_parsed_to is None: compare_parsed_to = to_parse self.assertEqual(obj.decode(), self.document_for(compare_parsed_to)) def assertConnectedness(self, element): """Ensure that next_element and previous_element are properly set for all descendants of the given element. """ earlier = None for e in element.descendants: if earlier: self.assertEqual(e, earlier.next_element) self.assertEqual(earlier, e.previous_element) earlier = e class HTMLTreeBuilderSmokeTest(object): """A basic test of a treebuilder's competence. Any HTML treebuilder, present or future, should be able to pass these tests. With invalid markup, there's room for interpretation, and different parsers can handle it differently. But with the markup in these tests, there's not much room for interpretation. """ def test_empty_element_tags(self): """Verify that all HTML4 and HTML5 empty element (aka void element) tags are handled correctly. """ for name in [ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', 'spacer', 'frame' ]: soup = self.soup("") new_tag = soup.new_tag(name) self.assertEqual(True, new_tag.is_empty_element) def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def assertDoctypeHandled(self, doctype_fragment): """Assert that a given doctype string is handled correctly.""" doctype_str, soup = self._document_with_doctype(doctype_fragment) # Make sure a Doctype object was created. doctype = soup.contents[0] self.assertEqual(doctype.__class__, Doctype) self.assertEqual(doctype, doctype_fragment) self.assertEqual(str(soup)[:len(doctype_str)], doctype_str) # Make sure that the doctype was correctly associated with the # parse tree and that the rest of the document parsed. self.assertEqual(soup.p.contents[0], 'foo') def _document_with_doctype(self, doctype_fragment): """Generate and parse a document with the given doctype.""" doctype = '<!DOCTYPE %s>' % doctype_fragment markup = doctype + '\n<p>foo</p>' soup = self.soup(markup) return doctype, soup def test_normal_doctypes(self): """Make sure normal, everyday HTML doctypes are handled correctly.""" self.assertDoctypeHandled("html") self.assertDoctypeHandled( 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_public_doctype_with_url(self): doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' self.assertDoctypeHandled(doctype) def test_system_doctype(self): self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') def test_namespaced_system_doctype(self): # We can handle a namespaced doctype with a system ID. self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') def test_namespaced_public_doctype(self): # Test a namespaced doctype with a public id. self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b""), markup.replace(b"\n", b"")) def test_processing_instruction(self): # We test both Unicode and bytestring to verify that # process_markup correctly sets processing_instruction_class # even when the markup is already Unicode and there is no # need to process anything. markup = u"""<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.decode()) markup = b"""<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_deepcopy(self): """Make sure you can copy the tree builder. This is important because the builder is part of a BeautifulSoup object, and we want to be able to copy that. """ copy.deepcopy(self.default_builder) def test_p_tag_is_never_empty_element(self): """A <p> tag is never designated as an empty-element tag. Even if the markup shows it as an empty-element tag, it shouldn't be presented that way. """ soup = self.soup("<p/>") self.assertFalse(soup.p.is_empty_element) self.assertEqual(str(soup.p), "<p></p>") def test_unclosed_tags_get_closed(self): """A tag that's not closed by the end of the document should be closed. This applies to all tags except empty-element tags. """ self.assertSoupEquals("<p>", "<p></p>") self.assertSoupEquals("<b>", "<b></b>") self.assertSoupEquals("<br>", "<br/>") def test_br_is_always_empty_element_tag(self): """A <br> tag is designated as an empty-element tag. Some parsers treat <br></br> as one <br/> tag, some parsers as two tags, but it should always be an empty-element tag. """ soup = self.soup("<br></br>") self.assertTrue(soup.br.is_empty_element) self.assertEqual(str(soup.br), "<br/>") def test_nested_formatting_elements(self): self.assertSoupEquals("<em><em></em></em>") def test_double_head(self): html = '''<!DOCTYPE html> <html> <head> <title>Ordinary HEAD element test</title> </head> <script type="text/javascript"> alert("Help!"); </script> <body> Hello, world! </body> </html> ''' soup = self.soup(html) self.assertEqual("text/javascript", soup.find('script')['type']) def test_comment(self): # Comments are represented as Comment objects. markup = "<p>foo<!--foobar-->baz</p>" self.assertSoupEquals(markup) soup = self.soup(markup) comment = soup.find(text="foobar") self.assertEqual(comment.__class__, Comment) # The comment is properly integrated into the tree. foo = soup.find(text="foo") self.assertEqual(comment, foo.next_element) baz = soup.find(text="baz") self.assertEqual(comment, baz.previous_element) def test_preserved_whitespace_in_pre_and_textarea(self): """Whitespace must be preserved in <pre> and <textarea> tags, even if that would mean not prettifying the markup. """ pre_markup = "<pre> </pre>" textarea_markup = "<textarea> woo\nwoo </textarea>" self.assertSoupEquals(pre_markup) self.assertSoupEquals(textarea_markup) soup = self.soup(pre_markup) self.assertEqual(soup.pre.prettify(), pre_markup) soup = self.soup(textarea_markup) self.assertEqual(soup.textarea.prettify(), textarea_markup) soup = self.soup("<textarea></textarea>") self.assertEqual(soup.textarea.prettify(), "<textarea></textarea>") def test_nested_inline_elements(self): """Inline elements can be nested indefinitely.""" b_tag = "<b>Inside a B tag</b>" self.assertSoupEquals(b_tag) nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>" self.assertSoupEquals(nested_b_tag) double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>" self.assertSoupEquals(nested_b_tag) def test_nested_block_level_elements(self): """Block elements can be nested.""" soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>') blockquote = soup.blockquote self.assertEqual(blockquote.p.b.string, 'Foo') self.assertEqual(blockquote.b.string, 'Foo') def test_correctly_nested_tables(self): """One table can go inside another one.""" markup = ('<table id="1">' '<tr>' "<td>Here's another table:" '<table id="2">' '<tr><td>foo</td></tr>' '</table></td>') self.assertSoupEquals( markup, '<table id="1"><tr><td>Here\'s another table:' '<table id="2"><tr><td>foo</td></tr></table>' '</td></tr></table>') self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>") def test_deeply_nested_multivalued_attribute(self): # html5lib can set the attributes of the same tag many times # as it rearranges the tree. This has caused problems with # multivalued attributes. markup = '<table><div><div class="css"></div></div></table>' soup = self.soup(markup) self.assertEqual(["css"], soup.div.div['class']) def test_multivalued_attribute_on_html(self): # html5lib uses a different API to set the attributes ot the # <html> tag. This has caused problems with multivalued # attributes. markup = '<html class="a b"></html>' soup = self.soup(markup) self.assertEqual(["a", "b"], soup.html['class']) def test_angle_brackets_in_attribute_values_are_escaped(self): self.assertSoupEquals('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>') def test_entities_in_attributes_converted_to_unicode(self): expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>' self.assertSoupEquals('<p id="pi&#241;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#Xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&ntilde;ata"></p>', expect) def test_entities_in_text_converted_to_unicode(self): expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>' self.assertSoupEquals("<p>pi&#241;ata</p>", expect) self.assertSoupEquals("<p>pi&#xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&#Xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&ntilde;ata</p>", expect) def test_quot_entity_converted_to_quotation_mark(self): self.assertSoupEquals("<p>I said &quot;good day!&quot;</p>", '<p>I said "good day!"</p>') def test_out_of_range_entity(self): expect = u"\N{REPLACEMENT CHARACTER}" self.assertSoupEquals("&#10000000000000;", expect) self.assertSoupEquals("&#x10000000000000;", expect) self.assertSoupEquals("&#1000000000;", expect) def test_multipart_strings(self): "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." soup = self.soup("<html><h2>\nfoo</h2><p></p></html>") self.assertEqual("p", soup.h2.string.next_element.name) self.assertEqual("p", soup.p.name) self.assertConnectedness(soup) def test_empty_element_tags(self): """Verify consistent handling of empty-element tags, no matter how they come in through the markup. """ self.assertSoupEquals('<br/><br/><br/>', "<br/><br/><br/>") self.assertSoupEquals('<br /><br /><br />', "<br/><br/><br/>") def test_head_tag_between_head_and_body(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<html><head></head> <link></link> <body>foo</body> </html> """ soup = self.soup(content) self.assertNotEqual(None, soup.html.body) self.assertConnectedness(soup) def test_multiple_copies_of_a_tag(self): "Prevent recurrence of a bug in the html5lib treebuilder." content = """<!DOCTYPE html> <html> <body> <article id="a" > <div><a href="1"></div> <footer> <a href="2"></a> </footer> </article> </body> </html> """ soup = self.soup(content) self.assertConnectedness(soup.article) def test_basic_namespaces(self): """Parsers don't need to *understand* namespaces, but at the very least they should not choke on namespaces or lose data.""" markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>' soup = self.soup(markup) self.assertEqual(markup, soup.encode()) html = soup.html self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns']) self.assertEqual( 'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml']) self.assertEqual( 'http://www.w3.org/2000/svg', soup.html['xmlns:svg']) def test_multivalued_attribute_value_becomes_list(self): markup = b'<a class="foo bar">' soup = self.soup(markup) self.assertEqual(['foo', 'bar'], soup.a['class']) # # Generally speaking, tests below this point are more tests of # Beautiful Soup than tests of the tree builders. But parsers are # weird, so we run these tests separately for every tree builder # to detect any differences between them. # def test_can_parse_unicode_document(self): # A seemingly innocuous document... but it's in Unicode! And # it contains characters that can't be represented in the # encoding found in the declaration! The horror! markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>' soup = self.soup(markup) self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string) def test_soupstrainer(self): """Parsers should be able to work with SoupStrainers.""" strainer = SoupStrainer("b") soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>", parse_only=strainer) self.assertEqual(soup.decode(), "<b>bold</b>") def test_single_quote_attribute_values_become_double_quotes(self): self.assertSoupEquals("<foo attr='bar'></foo>", '<foo attr="bar"></foo>') def test_attribute_values_with_nested_quotes_are_left_alone(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" self.assertSoupEquals(text) def test_attribute_values_with_double_nested_quotes_get_quoted(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" soup = self.soup(text) soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' self.assertSoupEquals( soup.foo.decode(), """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""") def test_ampersand_in_attribute_value_gets_escaped(self): self.assertSoupEquals('<this is="really messed up & stuff"></this>', '<this is="really messed up &amp; stuff"></this>') self.assertSoupEquals( '<a href="http://example.org?a=1&b=2;3">foo</a>', '<a href="http://example.org?a=1&amp;b=2;3">foo</a>') def test_escaped_ampersand_in_attribute_value_is_left_alone(self): self.assertSoupEquals('<a href="http://example.org?a=1&amp;b=2;3"></a>') def test_entities_in_strings_converted_during_parsing(self): # Both XML and HTML entities are converted to Unicode characters # during parsing. text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = u"<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>" self.assertSoupEquals(text, expected) def test_smart_quotes_converted_on_the_way_in(self): # Microsoft smart quotes are converted to Unicode characters during # parsing. quote = b"<p>\x91Foo\x92</p>" soup = self.soup(quote) self.assertEqual( soup.p.string, u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") def test_non_breaking_spaces_converted_on_the_way_in(self): soup = self.soup("<a>&nbsp;&nbsp;</a>") self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2) def test_entities_converted_on_the_way_out(self): text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = u"<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode("utf-8") soup = self.soup(text) self.assertEqual(soup.p.encode("utf-8"), expected) def test_real_iso_latin_document(self): # Smoke test of interrelated functionality, using an # easy-to-understand document. # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>' # That's because we're going to encode it into ISO-Latin-1, and use # that to test. iso_latin_html = unicode_html.encode("iso-8859-1") # Parse the ISO-Latin-1 HTML. soup = self.soup(iso_latin_html) # Encode it to UTF-8. result = soup.encode("utf-8") # What do we expect the result to look like? Well, it would # look like unicode_html, except that the META tag would say # UTF-8 instead of ISO-Latin-1. expected = unicode_html.replace("ISO-Latin-1", "utf-8") # And, of course, it would be in UTF-8, not Unicode. expected = expected.encode("utf-8") # Ta-da! self.assertEqual(result, expected) def test_real_shift_jis_document(self): # Smoke test to make sure the parser can handle a document in # Shift-JIS encoding, without choking. shift_jis_html = ( b'<html><head></head><body><pre>' b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f' b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c' b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B' b'</pre></body></html>') unicode_html = shift_jis_html.decode("shift-jis") soup = self.soup(unicode_html) # Make sure the parse tree is correctly encoded to various # encodings. self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) def test_real_hebrew_document(self): # A real-world test to make sure we can convert ISO-8859-9 (a # Hebrew encoding) to UTF-8. hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>' soup = self.soup( hebrew_document, from_encoding="iso8859-8") # Some tree builders call it iso8859-8, others call it iso-8859-9. # That's not a difference we really care about. assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') self.assertEqual( soup.encode('utf-8'), hebrew_document.decode("iso8859-8").encode("utf-8")) def test_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) content = parsed_meta['content'] self.assertEqual('text/html; charset=x-sjis', content) # But that value is actually a ContentMetaAttributeValue object. self.assertTrue(isinstance(content, ContentMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('text/html; charset=utf8', content.encode("utf8")) # For the rest of the story, see TestSubstitutions in # test_tree.py. def test_html5_style_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta id="encoding" charset="x-sjis" />') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', id="encoding") charset = parsed_meta['charset'] self.assertEqual('x-sjis', charset) # But that value is actually a CharsetMetaAttributeValue object. self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('utf8', charset.encode("utf8")) def test_tag_with_no_attributes_can_have_attributes_added(self): data = self.soup("<a>text</a>") data.a['foo'] = 'bar' self.assertEqual('<a foo="bar">text</a>', data.a.decode()) class XMLTreeBuilderSmokeTest(object): def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), tree.decode()) def test_docstring_generated(self): soup = self.soup("<root/>") self.assertEqual( soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>') def test_xml_declaration(self): markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_processing_instruction(self): markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>""" soup = self.soup(markup) self.assertEqual(markup, soup.encode("utf8")) def test_real_xhtml_document(self): """A real XHTML document should come out *exactly* the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8"), markup) def test_formatter_processes_script_tag_for_xml_documents(self): doc = """ <script type="text/javascript"> </script> """ soup = BeautifulSoup(doc, "lxml-xml") # lxml would have stripped this while parsing, but we can add # it later. soup.script.string = 'console.log("< < hey > > ");' encoded = soup.encode() self.assertTrue(b"&lt; &lt; hey &gt; &gt;" in encoded) def test_can_parse_unicode_document(self): markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' soup = self.soup(markup) self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) def test_popping_namespaced_tag(self): markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>' soup = self.soup(markup) self.assertEqual( unicode(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("<root/>") self.assertEqual( soup.encode("latin1"), b'<?xml version="1.0" encoding="latin1"?>\n<root/>') def test_large_xml_document(self): """A large XML document should come out the same as it went in.""" markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>' + b'0' * (2**12) + b'</root>') soup = self.soup(markup) self.assertEqual(soup.encode("utf-8"), markup) def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): self.assertSoupEquals("<p>", "<p/>") self.assertSoupEquals("<p>foo</p>") def test_namespaces_are_preserved(self): markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>' soup = self.soup(markup) root = soup.root self.assertEqual("http://example.com/", root['xmlns:a']) self.assertEqual("http://example.net/", root['xmlns:b']) def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' soup = self.soup(markup) self.assertEqual(unicode(soup.p), markup) def test_namespaced_attributes(self): markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) def test_namespaced_attributes_xml_namespace(self): markup = '<foo xml:lang="fr">bar</foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) def test_find_by_prefixed_name(self): doc = """<?xml version="1.0" encoding="utf-8"?> <Document xmlns="http://example.com/ns0" xmlns:ns1="http://example.com/ns1" xmlns:ns2="http://example.com/ns2" <ns1:tag>foo</ns1:tag> <ns1:tag>bar</ns1:tag> <ns2:tag key="value">baz</ns2:tag> </Document> """ soup = self.soup(doc) # There are three <tag> tags. self.assertEqual(3, len(soup.find_all('tag'))) # But two of them are ns1:tag and one of them is ns2:tag. self.assertEqual(2, len(soup.find_all('ns1:tag'))) self.assertEqual(1, len(soup.find_all('ns2:tag'))) self.assertEqual(1, len(soup.find_all('ns2:tag', key='value'))) self.assertEqual(3, len(soup.find_all(['ns1:tag', 'ns2:tag']))) def test_copy_tag_preserves_namespace(self): xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <w:document xmlns:w="http://example.com/ns0"/>""" soup = self.soup(xml) tag = soup.document duplicate = copy.copy(tag) # The two tags have the same namespace prefix. self.assertEqual(tag.prefix, duplicate.prefix) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" def test_real_xhtml_document(self): # Since XHTML is not HTML5, HTML5 parsers are not tested to handle # XHTML documents in any particular way. pass def test_html_tags_have_namespace(self): markup = "<a>" soup = self.soup(markup) self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) def test_svg_tags_have_namespace(self): markup = '<svg><circle/></svg>' soup = self.soup(markup) namespace = "http://www.w3.org/2000/svg" self.assertEqual(namespace, soup.svg.namespace) self.assertEqual(namespace, soup.circle.namespace) def test_mathml_tags_have_namespace(self): markup = '<math><msqrt>5</msqrt></math>' soup = self.soup(markup) namespace = 'http://www.w3.org/1998/Math/MathML' self.assertEqual(namespace, soup.math.namespace) self.assertEqual(namespace, soup.msqrt.namespace) def test_xml_declaration_becomes_comment(self): markup = '<?xml version="1.0" encoding="utf-8"?><html></html>' soup = self.soup(markup) self.assertTrue(isinstance(soup.contents[0], Comment)) self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') self.assertEqual("html", soup.contents[0].next_element.name) def skipIf(condition, reason): def nothing(test, *args, **kwargs): return None def decorator(test_item): if condition: return nothing else: return test_item return decorator
30,829
Python
.py
631
40.431062
237
0.628867
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,811
dammit.py
evilhero_mylar/lib/bs4/dammit.py
# -*- coding: utf-8 -*- """Beautiful Soup bonus library: Unicode, Dammit This library converts a bytestream to Unicode through any means necessary. It is heavily based on code from Mark Pilgrim's Universal Feed Parser. It works best on XML and HTML, but it does not rewrite the XML or HTML to reflect a new encoding; that's the tree builder's job. """ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __license__ = "MIT" import codecs from htmlentitydefs import codepoint2name import re import logging import string # Import a library to autodetect character encodings. chardet_type = None try: # First try the fast C implementation. # PyPI package: cchardet import cchardet def chardet_dammit(s): return cchardet.detect(s)['encoding'] except ImportError: try: # Fall back to the pure Python implementation # Debian package: python-chardet # PyPI package: chardet import chardet def chardet_dammit(s): return chardet.detect(s)['encoding'] #import chardet.constants #chardet.constants._debug = 1 except ImportError: # No chardet available. def chardet_dammit(s): return None # Available from http://cjkpython.i18n.org/. try: import iconv_codec except ImportError: pass xml_encoding_re = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I) html_meta_re = re.compile( '<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I) class EntitySubstitution(object): """Substitute XML or HTML entities for the corresponding characters.""" def _populate_class_variables(): lookup = {} reverse_lookup = {} characters_for_re = [] for codepoint, name in list(codepoint2name.items()): character = unichr(codepoint) if codepoint != 34: # There's no point in turning the quotation mark into # &quot;, unless it happens within an attribute value, which # is handled elsewhere. characters_for_re.append(character) lookup[character] = name # But we do want to turn &quot; into the quotation mark. reverse_lookup[name] = character re_definition = "[%s]" % "".join(characters_for_re) return lookup, reverse_lookup, re.compile(re_definition) (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() CHARACTER_TO_XML_ENTITY = { "'": "apos", '"': "quot", "&": "amp", "<": "lt", ">": "gt", } BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" ")") AMPERSAND_OR_BRACKET = re.compile("([<>&])") @classmethod def _substitute_html_entity(cls, matchobj): entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) return "&%s;" % entity @classmethod def _substitute_xml_entity(cls, matchobj): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] return "&%s;" % entity @classmethod def quoted_attribute_value(self, value): """Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot; """ quote_with = '"' if '"' in value: if "'" in value: # The string contains both single and double # quotes. Turn the double quotes into # entities. We quote the double quotes rather than # the single quotes because the entity name is # "&quot;" whether this is HTML or XML. If we # quoted the single quotes, we'd have to decide # between &apos; and &squot;. replace_with = "&quot;" value = value.replace('"', replace_with) else: # There are double quotes but no single quotes. # We can use single quotes to quote the attribute. quote_with = "'" return quote_with + value + quote_with @classmethod def substitute_xml(cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands will become &amp;. If you want ampersands that appear to be part of an entity definition to be left alone, use substitute_xml_containing_entities() instead. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets and ampersands. value = cls.AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_xml_containing_entities( cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. :param value: A string to be substituted. The less-than sign will become &lt;, the greater-than sign will become &gt;, and any ampersands that are not part of an entity defition will become &amp;. :param make_quoted_attribute: If True, then the string will be quoted, as befits an attribute value. """ # Escape angle brackets, and ampersands that aren't part of # entities. value = cls.BARE_AMPERSAND_OR_BRACKET.sub( cls._substitute_xml_entity, value) if make_quoted_attribute: value = cls.quoted_attribute_value(value) return value @classmethod def substitute_html(cls, s): """Replace certain Unicode characters with named HTML entities. This differs from data.encode(encoding, 'xmlcharrefreplace') in that the goal is to make the result more readable (to those with ASCII displays) rather than to recover from errors. There's absolutely nothing wrong with a UTF-8 string containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that character with "&eacute;" will make it more readable to some people. """ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( cls._substitute_html_entity, s) class EncodingDetector: """Suggests a number of possible encodings for a bytestring. Order of precedence: 1. Encodings you specifically tell EncodingDetector to try first (the override_encodings argument to the constructor). 2. An encoding declared within the bytestring itself, either in an XML declaration (if the bytestring is to be interpreted as an XML document), or in a <meta> tag (if the bytestring is to be interpreted as an HTML document.) 3. An encoding detected through textual analysis by chardet, cchardet, or a similar external library. 4. UTF-8. 5. Windows-1252. """ def __init__(self, markup, override_encodings=None, is_html=False, exclude_encodings=None): self.override_encodings = override_encodings or [] exclude_encodings = exclude_encodings or [] self.exclude_encodings = set([x.lower() for x in exclude_encodings]) self.chardet_encoding = None self.is_html = is_html self.declared_encoding = None # First order of business: strip a byte-order mark. self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) def _usable(self, encoding, tried): if encoding is not None: encoding = encoding.lower() if encoding in self.exclude_encodings: return False if encoding not in tried: tried.add(encoding) return True return False @property def encodings(self): """Yield a number of encodings that might work for this markup.""" tried = set() for e in self.override_encodings: if self._usable(e, tried): yield e # Did the document originally start with a byte-order mark # that indicated its encoding? if self._usable(self.sniffed_encoding, tried): yield self.sniffed_encoding # Look within the document for an XML or HTML encoding # declaration. if self.declared_encoding is None: self.declared_encoding = self.find_declared_encoding( self.markup, self.is_html) if self._usable(self.declared_encoding, tried): yield self.declared_encoding # Use third-party character set detection to guess at the # encoding. if self.chardet_encoding is None: self.chardet_encoding = chardet_dammit(self.markup) if self._usable(self.chardet_encoding, tried): yield self.chardet_encoding # As a last-ditch effort, try utf-8 and windows-1252. for e in ('utf-8', 'windows-1252'): if self._usable(e, tried): yield e @classmethod def strip_byte_order_mark(cls, data): """If a byte-order mark is present, strip it and return the encoding it implies.""" encoding = None if isinstance(data, unicode): # Unicode data cannot have a byte-order mark. return data, encoding if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == b'\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == b'\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == b'\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] return data, encoding @classmethod def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): """Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. """ if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) declared_encoding = None declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0].decode( 'ascii', 'replace') if declared_encoding: return declared_encoding.lower() return None class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = {"macintosh": "mac-roman", "x-sjis": "shift-jis"} ENCODINGS_WITH_SMART_QUOTES = [ "windows-1252", "iso-8859-1", "iso-8859-2", ] def __init__(self, markup, override_encodings=[], smart_quotes_to=None, is_html=False, exclude_encodings=[]): self.smart_quotes_to = smart_quotes_to self.tried_encodings = [] self.contains_replacement_characters = False self.is_html = is_html self.log = logging.getLogger(__name__) self.detector = EncodingDetector( markup, override_encodings, is_html, exclude_encodings) # Short-circuit if the data is in Unicode to begin with. if isinstance(markup, unicode) or markup == '': self.markup = markup self.unicode_markup = unicode(markup) self.original_encoding = None return # The encoding detector may have stripped a byte-order mark. # Use the stripped markup from this point on. self.markup = self.detector.markup u = None for encoding in self.detector.encodings: markup = self.detector.markup u = self._convert_from(encoding) if u is not None: break if not u: # None of the encodings worked. As an absolute last resort, # try them again with character replacement. for encoding in self.detector.encodings: if encoding != "ascii": u = self._convert_from(encoding, "replace") if u is not None: self.log.warning( "Some characters could not be decoded, and were " "replaced with REPLACEMENT CHARACTER." ) self.contains_replacement_characters = True break # If none of that worked, we could at this point force it to # ASCII, but that would destroy so much data that I think # giving up is better. self.unicode_markup = u if not u: self.original_encoding = None def _sub_ms_char(self, match): """Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" orig = match.group(1) if self.smart_quotes_to == 'ascii': sub = self.MS_CHARS_TO_ASCII.get(orig).encode() else: sub = self.MS_CHARS.get(orig) if type(sub) == tuple: if self.smart_quotes_to == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub def _convert_from(self, proposed, errors="strict"): proposed = self.find_codec(proposed) if not proposed or (proposed, errors) in self.tried_encodings: return None self.tried_encodings.append((proposed, errors)) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if (self.smart_quotes_to is not None and proposed in self.ENCODINGS_WITH_SMART_QUOTES): smart_quotes_re = b"([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) try: #print "Trying to convert document to %s (errors=%s)" % ( # proposed, errors) u = self._to_unicode(markup, proposed, errors) self.markup = u self.original_encoding = proposed except Exception as e: #print "That didn't work!" #print e return None #print "Correct encoding: %s" % proposed return self.markup def _to_unicode(self, data, encoding, errors="strict"): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' return unicode(data, encoding, errors) @property def declared_html_encoding(self): if not self.is_html: return None return self.detector.declared_encoding def find_codec(self, charset): value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) or (charset and self._codec(charset.replace("-", ""))) or (charset and self._codec(charset.replace("-", "_"))) or (charset and charset.lower()) or charset ) if value: return value.lower() return None def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. MS_CHARS = {b'\x80': ('euro', '20AC'), b'\x81': ' ', b'\x82': ('sbquo', '201A'), b'\x83': ('fnof', '192'), b'\x84': ('bdquo', '201E'), b'\x85': ('hellip', '2026'), b'\x86': ('dagger', '2020'), b'\x87': ('Dagger', '2021'), b'\x88': ('circ', '2C6'), b'\x89': ('permil', '2030'), b'\x8A': ('Scaron', '160'), b'\x8B': ('lsaquo', '2039'), b'\x8C': ('OElig', '152'), b'\x8D': '?', b'\x8E': ('#x17D', '17D'), b'\x8F': '?', b'\x90': '?', b'\x91': ('lsquo', '2018'), b'\x92': ('rsquo', '2019'), b'\x93': ('ldquo', '201C'), b'\x94': ('rdquo', '201D'), b'\x95': ('bull', '2022'), b'\x96': ('ndash', '2013'), b'\x97': ('mdash', '2014'), b'\x98': ('tilde', '2DC'), b'\x99': ('trade', '2122'), b'\x9a': ('scaron', '161'), b'\x9b': ('rsaquo', '203A'), b'\x9c': ('oelig', '153'), b'\x9d': '?', b'\x9e': ('#x17E', '17E'), b'\x9f': ('Yuml', ''),} # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains # horrors like stripping diacritical marks to turn á into a, but also # contains non-horrors like turning “ into ". MS_CHARS_TO_ASCII = { b'\x80' : 'EUR', b'\x81' : ' ', b'\x82' : ',', b'\x83' : 'f', b'\x84' : ',,', b'\x85' : '...', b'\x86' : '+', b'\x87' : '++', b'\x88' : '^', b'\x89' : '%', b'\x8a' : 'S', b'\x8b' : '<', b'\x8c' : 'OE', b'\x8d' : '?', b'\x8e' : 'Z', b'\x8f' : '?', b'\x90' : '?', b'\x91' : "'", b'\x92' : "'", b'\x93' : '"', b'\x94' : '"', b'\x95' : '*', b'\x96' : '-', b'\x97' : '--', b'\x98' : '~', b'\x99' : '(TM)', b'\x9a' : 's', b'\x9b' : '>', b'\x9c' : 'oe', b'\x9d' : '?', b'\x9e' : 'z', b'\x9f' : 'Y', b'\xa0' : ' ', b'\xa1' : '!', b'\xa2' : 'c', b'\xa3' : 'GBP', b'\xa4' : '$', #This approximation is especially parochial--this is the #generic currency symbol. b'\xa5' : 'YEN', b'\xa6' : '|', b'\xa7' : 'S', b'\xa8' : '..', b'\xa9' : '', b'\xaa' : '(th)', b'\xab' : '<<', b'\xac' : '!', b'\xad' : ' ', b'\xae' : '(R)', b'\xaf' : '-', b'\xb0' : 'o', b'\xb1' : '+-', b'\xb2' : '2', b'\xb3' : '3', b'\xb4' : ("'", 'acute'), b'\xb5' : 'u', b'\xb6' : 'P', b'\xb7' : '*', b'\xb8' : ',', b'\xb9' : '1', b'\xba' : '(th)', b'\xbb' : '>>', b'\xbc' : '1/4', b'\xbd' : '1/2', b'\xbe' : '3/4', b'\xbf' : '?', b'\xc0' : 'A', b'\xc1' : 'A', b'\xc2' : 'A', b'\xc3' : 'A', b'\xc4' : 'A', b'\xc5' : 'A', b'\xc6' : 'AE', b'\xc7' : 'C', b'\xc8' : 'E', b'\xc9' : 'E', b'\xca' : 'E', b'\xcb' : 'E', b'\xcc' : 'I', b'\xcd' : 'I', b'\xce' : 'I', b'\xcf' : 'I', b'\xd0' : 'D', b'\xd1' : 'N', b'\xd2' : 'O', b'\xd3' : 'O', b'\xd4' : 'O', b'\xd5' : 'O', b'\xd6' : 'O', b'\xd7' : '*', b'\xd8' : 'O', b'\xd9' : 'U', b'\xda' : 'U', b'\xdb' : 'U', b'\xdc' : 'U', b'\xdd' : 'Y', b'\xde' : 'b', b'\xdf' : 'B', b'\xe0' : 'a', b'\xe1' : 'a', b'\xe2' : 'a', b'\xe3' : 'a', b'\xe4' : 'a', b'\xe5' : 'a', b'\xe6' : 'ae', b'\xe7' : 'c', b'\xe8' : 'e', b'\xe9' : 'e', b'\xea' : 'e', b'\xeb' : 'e', b'\xec' : 'i', b'\xed' : 'i', b'\xee' : 'i', b'\xef' : 'i', b'\xf0' : 'o', b'\xf1' : 'n', b'\xf2' : 'o', b'\xf3' : 'o', b'\xf4' : 'o', b'\xf5' : 'o', b'\xf6' : 'o', b'\xf7' : '/', b'\xf8' : 'o', b'\xf9' : 'u', b'\xfa' : 'u', b'\xfb' : 'u', b'\xfc' : 'u', b'\xfd' : 'y', b'\xfe' : 'b', b'\xff' : 'y', } # A map used when removing rogue Windows-1252/ISO-8859-1 # characters in otherwise UTF-8 documents. # # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in # Windows-1252. WINDOWS_1252_TO_UTF8 = { 0x80 : b'\xe2\x82\xac', # € 0x82 : b'\xe2\x80\x9a', # ‚ 0x83 : b'\xc6\x92', # ƒ 0x84 : b'\xe2\x80\x9e', # „ 0x85 : b'\xe2\x80\xa6', # … 0x86 : b'\xe2\x80\xa0', # † 0x87 : b'\xe2\x80\xa1', # ‡ 0x88 : b'\xcb\x86', # ˆ 0x89 : b'\xe2\x80\xb0', # ‰ 0x8a : b'\xc5\xa0', # Š 0x8b : b'\xe2\x80\xb9', # ‹ 0x8c : b'\xc5\x92', # Œ 0x8e : b'\xc5\xbd', # Ž 0x91 : b'\xe2\x80\x98', # ‘ 0x92 : b'\xe2\x80\x99', # ’ 0x93 : b'\xe2\x80\x9c', # “ 0x94 : b'\xe2\x80\x9d', # ” 0x95 : b'\xe2\x80\xa2', # • 0x96 : b'\xe2\x80\x93', # – 0x97 : b'\xe2\x80\x94', # — 0x98 : b'\xcb\x9c', # ˜ 0x99 : b'\xe2\x84\xa2', # ™ 0x9a : b'\xc5\xa1', # š 0x9b : b'\xe2\x80\xba', # › 0x9c : b'\xc5\x93', # œ 0x9e : b'\xc5\xbe', # ž 0x9f : b'\xc5\xb8', # Ÿ 0xa0 : b'\xc2\xa0', #   0xa1 : b'\xc2\xa1', # ¡ 0xa2 : b'\xc2\xa2', # ¢ 0xa3 : b'\xc2\xa3', # £ 0xa4 : b'\xc2\xa4', # ¤ 0xa5 : b'\xc2\xa5', # ¥ 0xa6 : b'\xc2\xa6', # ¦ 0xa7 : b'\xc2\xa7', # § 0xa8 : b'\xc2\xa8', # ¨ 0xa9 : b'\xc2\xa9', # © 0xaa : b'\xc2\xaa', # ª 0xab : b'\xc2\xab', # « 0xac : b'\xc2\xac', # ¬ 0xad : b'\xc2\xad', # ­ 0xae : b'\xc2\xae', # ® 0xaf : b'\xc2\xaf', # ¯ 0xb0 : b'\xc2\xb0', # ° 0xb1 : b'\xc2\xb1', # ± 0xb2 : b'\xc2\xb2', # ² 0xb3 : b'\xc2\xb3', # ³ 0xb4 : b'\xc2\xb4', # ´ 0xb5 : b'\xc2\xb5', # µ 0xb6 : b'\xc2\xb6', # ¶ 0xb7 : b'\xc2\xb7', # · 0xb8 : b'\xc2\xb8', # ¸ 0xb9 : b'\xc2\xb9', # ¹ 0xba : b'\xc2\xba', # º 0xbb : b'\xc2\xbb', # » 0xbc : b'\xc2\xbc', # ¼ 0xbd : b'\xc2\xbd', # ½ 0xbe : b'\xc2\xbe', # ¾ 0xbf : b'\xc2\xbf', # ¿ 0xc0 : b'\xc3\x80', # À 0xc1 : b'\xc3\x81', # Á 0xc2 : b'\xc3\x82', #  0xc3 : b'\xc3\x83', # à 0xc4 : b'\xc3\x84', # Ä 0xc5 : b'\xc3\x85', # Å 0xc6 : b'\xc3\x86', # Æ 0xc7 : b'\xc3\x87', # Ç 0xc8 : b'\xc3\x88', # È 0xc9 : b'\xc3\x89', # É 0xca : b'\xc3\x8a', # Ê 0xcb : b'\xc3\x8b', # Ë 0xcc : b'\xc3\x8c', # Ì 0xcd : b'\xc3\x8d', # Í 0xce : b'\xc3\x8e', # Î 0xcf : b'\xc3\x8f', # Ï 0xd0 : b'\xc3\x90', # Ð 0xd1 : b'\xc3\x91', # Ñ 0xd2 : b'\xc3\x92', # Ò 0xd3 : b'\xc3\x93', # Ó 0xd4 : b'\xc3\x94', # Ô 0xd5 : b'\xc3\x95', # Õ 0xd6 : b'\xc3\x96', # Ö 0xd7 : b'\xc3\x97', # × 0xd8 : b'\xc3\x98', # Ø 0xd9 : b'\xc3\x99', # Ù 0xda : b'\xc3\x9a', # Ú 0xdb : b'\xc3\x9b', # Û 0xdc : b'\xc3\x9c', # Ü 0xdd : b'\xc3\x9d', # Ý 0xde : b'\xc3\x9e', # Þ 0xdf : b'\xc3\x9f', # ß 0xe0 : b'\xc3\xa0', # à 0xe1 : b'\xa1', # á 0xe2 : b'\xc3\xa2', # â 0xe3 : b'\xc3\xa3', # ã 0xe4 : b'\xc3\xa4', # ä 0xe5 : b'\xc3\xa5', # å 0xe6 : b'\xc3\xa6', # æ 0xe7 : b'\xc3\xa7', # ç 0xe8 : b'\xc3\xa8', # è 0xe9 : b'\xc3\xa9', # é 0xea : b'\xc3\xaa', # ê 0xeb : b'\xc3\xab', # ë 0xec : b'\xc3\xac', # ì 0xed : b'\xc3\xad', # í 0xee : b'\xc3\xae', # î 0xef : b'\xc3\xaf', # ï 0xf0 : b'\xc3\xb0', # ð 0xf1 : b'\xc3\xb1', # ñ 0xf2 : b'\xc3\xb2', # ò 0xf3 : b'\xc3\xb3', # ó 0xf4 : b'\xc3\xb4', # ô 0xf5 : b'\xc3\xb5', # õ 0xf6 : b'\xc3\xb6', # ö 0xf7 : b'\xc3\xb7', # ÷ 0xf8 : b'\xc3\xb8', # ø 0xf9 : b'\xc3\xb9', # ù 0xfa : b'\xc3\xba', # ú 0xfb : b'\xc3\xbb', # û 0xfc : b'\xc3\xbc', # ü 0xfd : b'\xc3\xbd', # ý 0xfe : b'\xc3\xbe', # þ } MULTIBYTE_MARKERS_AND_SIZES = [ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF (0xe0, 0xef, 3), # 3-byte characters start with E0-EF (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 ] FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] @classmethod def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"): """Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. The input must be a bytestring. If you've already converted the document to Unicode, you're too late. The output is a bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents. """ if embedded_encoding.replace('_', '-').lower() not in ( 'windows-1252', 'windows_1252'): raise NotImplementedError( "Windows-1252 and ISO-8859-1 are the only currently supported " "embedded encodings.") if main_encoding.lower() not in ('utf8', 'utf-8'): raise NotImplementedError( "UTF-8 is the only currently supported main encoding.") byte_chunks = [] chunk_start = 0 pos = 0 while pos < len(in_bytes): byte = in_bytes[pos] if not isinstance(byte, int): # Python 2.x byte = ord(byte) if (byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER): # This is the start of a UTF-8 multibyte character. Skip # to the end. for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: if byte >= start and byte <= end: pos += size break elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: # We found a Windows-1252 character! # Save the string up to this point as a chunk. byte_chunks.append(in_bytes[chunk_start:pos]) # Now translate the Windows-1252 character into UTF-8 # and add it as another, one-byte chunk. byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) pos += 1 chunk_start = pos else: # Go on to the next character. pos += 1 if chunk_start == 0: # The string is unchanged. return in_bytes else: # Store the final chunk. byte_chunks.append(in_bytes[chunk_start:]) return b''.join(byte_chunks)
29,930
Python
.py
760
29.014474
91
0.51026
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,812
test_docs.py
evilhero_mylar/lib/bs4/tests/test_docs.py
"Test harness for doctests." # pylint: disable-msg=E0611,W0142 __metaclass__ = type __all__ = [ 'additional_tests', ] import atexit import doctest import os #from pkg_resources import ( # resource_filename, resource_exists, resource_listdir, cleanup_resources) import unittest DOCTEST_FLAGS = ( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) # def additional_tests(): # "Run the doc tests (README.txt and docs/*, if any exist)" # doctest_files = [ # os.path.abspath(resource_filename('bs4', 'README.txt'))] # if resource_exists('bs4', 'docs'): # for name in resource_listdir('bs4', 'docs'): # if name.endswith('.txt'): # doctest_files.append( # os.path.abspath( # resource_filename('bs4', 'docs/%s' % name))) # kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS) # atexit.register(cleanup_resources) # return unittest.TestSuite(( # doctest.DocFileSuite(*doctest_files, **kwargs)))
1,067
Python
.py
30
33.7
77
0.643065
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,813
test_lxml.py
evilhero_mylar/lib/bs4/tests/test_lxml.py
"""Tests to ensure that the lxml tree builder generates good trees.""" import re import warnings try: import lxml.etree LXML_PRESENT = True LXML_VERSION = lxml.etree.LXML_VERSION except ImportError, e: LXML_PRESENT = False LXML_VERSION = (0,) if LXML_PRESENT: from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML from bs4 import ( BeautifulSoup, BeautifulStoneSoup, ) from bs4.element import Comment, Doctype, SoupStrainer from bs4.testing import skipIf from bs4.tests import test_htmlparser from bs4.testing import ( HTMLTreeBuilderSmokeTest, XMLTreeBuilderSmokeTest, SoupTest, skipIf, ) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its tree builder.") class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilder() def test_out_of_range_entity(self): self.assertSoupEquals( "<p>foo&#10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#x10000000000000;bar</p>", "<p>foobar</p>") self.assertSoupEquals( "<p>foo&#1000000000;bar</p>", "<p>foobar</p>") # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this # test if an old version of lxml is installed. @skipIf( not LXML_PRESENT or LXML_VERSION < (2,3,5,0), "Skipping doctype test for old version of lxml to avoid segfault.") def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_beautifulstonesoup_is_xml_parser(self): # Make sure that the deprecated BSS class uses an xml builder # if one is installed. with warnings.catch_warnings(record=True) as w: soup = BeautifulStoneSoup("<b />") self.assertEqual(u"<b/>", unicode(soup.b)) self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message)) @skipIf( not LXML_PRESENT, "lxml seems not to be present, not testing its XML tree builder.") class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest): """See ``HTMLTreeBuilderSmokeTest``.""" @property def default_builder(self): return LXMLTreeBuilderForXML()
2,382
Python
.py
64
31.59375
86
0.692108
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,814
test_builder_registry.py
evilhero_mylar/lib/bs4/tests/test_builder_registry.py
"""Tests of the builder registry.""" import unittest import warnings from bs4 import BeautifulSoup from bs4.builder import ( builder_registry as registry, HTMLParserTreeBuilder, TreeBuilderRegistry, ) try: from bs4.builder import HTML5TreeBuilder HTML5LIB_PRESENT = True except ImportError: HTML5LIB_PRESENT = False try: from bs4.builder import ( LXMLTreeBuilderForXML, LXMLTreeBuilder, ) LXML_PRESENT = True except ImportError: LXML_PRESENT = False class BuiltInRegistryTest(unittest.TestCase): """Test the built-in registry with the default builders registered.""" def test_combination(self): if LXML_PRESENT: self.assertEqual(registry.lookup('fast', 'html'), LXMLTreeBuilder) if LXML_PRESENT: self.assertEqual(registry.lookup('permissive', 'xml'), LXMLTreeBuilderForXML) self.assertEqual(registry.lookup('strict', 'html'), HTMLParserTreeBuilder) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html5lib', 'html'), HTML5TreeBuilder) def test_lookup_by_markup_type(self): if LXML_PRESENT: self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) else: self.assertEqual(registry.lookup('xml'), None) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html'), HTML5TreeBuilder) else: self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder) def test_named_library(self): if LXML_PRESENT: self.assertEqual(registry.lookup('lxml', 'xml'), LXMLTreeBuilderForXML) self.assertEqual(registry.lookup('lxml', 'html'), LXMLTreeBuilder) if HTML5LIB_PRESENT: self.assertEqual(registry.lookup('html5lib'), HTML5TreeBuilder) self.assertEqual(registry.lookup('html.parser'), HTMLParserTreeBuilder) def test_beautifulsoup_constructor_does_lookup(self): with warnings.catch_warnings(record=True) as w: # This will create a warning about not explicitly # specifying a parser, but we'll ignore it. # You can pass in a string. BeautifulSoup("", features="html") # Or a list of strings. BeautifulSoup("", features=["html", "fast"]) # You'll get an exception if BS can't find an appropriate # builder. self.assertRaises(ValueError, BeautifulSoup, "", features="no-such-feature") class RegistryTest(unittest.TestCase): """Test the TreeBuilderRegistry class in general.""" def setUp(self): self.registry = TreeBuilderRegistry() def builder_for_features(self, *feature_list): cls = type('Builder_' + '_'.join(feature_list), (object,), {'features' : feature_list}) self.registry.register(cls) return cls def test_register_with_no_features(self): builder = self.builder_for_features() # Since the builder advertises no features, you can't find it # by looking up features. self.assertEqual(self.registry.lookup('foo'), None) # But you can find it by doing a lookup with no features, if # this happens to be the only registered builder. self.assertEqual(self.registry.lookup(), builder) def test_register_with_features_makes_lookup_succeed(self): builder = self.builder_for_features('foo', 'bar') self.assertEqual(self.registry.lookup('foo'), builder) self.assertEqual(self.registry.lookup('bar'), builder) def test_lookup_fails_when_no_builder_implements_feature(self): builder = self.builder_for_features('foo', 'bar') self.assertEqual(self.registry.lookup('baz'), None) def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): builder1 = self.builder_for_features('foo') builder2 = self.builder_for_features('bar') self.assertEqual(self.registry.lookup(), builder2) def test_lookup_fails_when_no_tree_builders_registered(self): self.assertEqual(self.registry.lookup(), None) def test_lookup_gets_most_recent_builder_supporting_all_features(self): has_one = self.builder_for_features('foo') has_the_other = self.builder_for_features('bar') has_both_early = self.builder_for_features('foo', 'bar', 'baz') has_both_late = self.builder_for_features('foo', 'bar', 'quux') lacks_one = self.builder_for_features('bar') has_the_other = self.builder_for_features('foo') # There are two builders featuring 'foo' and 'bar', but # the one that also features 'quux' was registered later. self.assertEqual(self.registry.lookup('foo', 'bar'), has_both_late) # There is only one builder featuring 'foo', 'bar', and 'baz'. self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'), has_both_early) def test_lookup_fails_when_cannot_reconcile_requested_features(self): builder1 = self.builder_for_features('foo', 'bar') builder2 = self.builder_for_features('foo', 'baz') self.assertEqual(self.registry.lookup('bar', 'baz'), None)
5,582
Python
.py
117
37.418803
82
0.637534
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,815
test_html5lib.py
evilhero_mylar/lib/bs4/tests/test_html5lib.py
"""Tests to ensure that the html5lib tree builder generates good trees.""" import warnings try: from bs4.builder import HTML5TreeBuilder HTML5LIB_PRESENT = True except ImportError, e: HTML5LIB_PRESENT = False from bs4.element import SoupStrainer from bs4.testing import ( HTML5TreeBuilderSmokeTest, SoupTest, skipIf, ) @skipIf( not HTML5LIB_PRESENT, "html5lib seems not to be present, not testing its tree builder.") class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest): """See ``HTML5TreeBuilderSmokeTest``.""" @property def default_builder(self): return HTML5TreeBuilder() def test_soupstrainer(self): # The html5lib tree builder does not support SoupStrainers. strainer = SoupStrainer("b") markup = "<p>A <b>bold</b> statement.</p>" with warnings.catch_warnings(record=True) as w: soup = self.soup(markup, parse_only=strainer) self.assertEqual( soup.decode(), self.document_for(markup)) self.assertTrue( "the html5lib tree builder doesn't support parse_only" in str(w[0].message)) def test_correctly_nested_tables(self): """html5lib inserts <tbody> tags where other parsers don't.""" markup = ('<table id="1">' '<tr>' "<td>Here's another table:" '<table id="2">' '<tr><td>foo</td></tr>' '</table></td>') self.assertSoupEquals( markup, '<table id="1"><tbody><tr><td>Here\'s another table:' '<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>' '</td></tr></tbody></table>') self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>") def test_xml_declaration_followed_by_doctype(self): markup = '''<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html> <html> <head> </head> <body> <p>foo</p> </body> </html>''' soup = self.soup(markup) # Verify that we can reach the <p> tag; this means the tree is connected. self.assertEqual(b"<p>foo</p>", soup.p.encode()) def test_reparented_markup(self): markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>' soup = self.soup(markup) self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p'))) def test_reparented_markup_ends_with_whitespace(self): markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n' soup = self.soup(markup) self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p'))) def test_reparented_markup_containing_identical_whitespace_nodes(self): """Verify that we keep the two whitespace nodes in this document distinct when reparenting the adjacent <tbody> tags. """ markup = '<table> <tbody><tbody><ims></tbody> </table>' soup = self.soup(markup) space1, space2 = soup.find_all(string=' ') tbody1, tbody2 = soup.find_all('tbody') assert space1.next_element is tbody1 assert tbody2.next_element is space2 def test_reparented_markup_containing_children(self): markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>' soup = self.soup(markup) noscript = soup.noscript self.assertEqual("target", noscript.next_element) target = soup.find(string='target') # The 'aftermath' string was duplicated; we want the second one. final_aftermath = soup.find_all(string='aftermath')[-1] # The <noscript> tag was moved beneath a copy of the <a> tag, # but the 'target' string within is still connected to the # (second) 'aftermath' string. self.assertEqual(final_aftermath, target.next_element) self.assertEqual(target, final_aftermath.previous_element) def test_processing_instruction(self): """Processing instructions become comments.""" markup = b"""<?PITarget PIContent?>""" soup = self.soup(markup) assert str(soup).startswith("<!--?PITarget PIContent?-->") def test_cloned_multivalue_node(self): markup = b"""<a class="my_class"><p></a>""" soup = self.soup(markup) a1, a2 = soup.find_all('a') self.assertEqual(a1, a2) assert a1 is not a2 def test_foster_parenting(self): markup = b"""<table><td></tbody>A""" soup = self.soup(markup) self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
4,908
Python
.py
110
36.409091
120
0.609853
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,816
test_htmlparser.py
evilhero_mylar/lib/bs4/tests/test_htmlparser.py
"""Tests to ensure that the html.parser tree builder generates good trees.""" from pdb import set_trace import pickle from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest from bs4.builder import HTMLParserTreeBuilder class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): @property def default_builder(self): return HTMLParserTreeBuilder() def test_namespaced_system_doctype(self): # html.parser can't handle namespaced doctypes, so skip this one. pass def test_namespaced_public_doctype(self): # html.parser can't handle namespaced doctypes, so skip this one. pass def test_builder_is_pickled(self): """Unlike most tree builders, HTMLParserTreeBuilder and will be restored after pickling. """ tree = self.soup("<a><b>foo</a>") dumped = pickle.dumps(tree, 2) loaded = pickle.loads(dumped) self.assertTrue(isinstance(loaded.builder, type(tree.builder))) def test_redundant_empty_element_closing_tags(self): self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>") self.assertSoupEquals('</br></br></br>', "")
1,191
Python
.py
27
37.814815
79
0.698358
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,817
test_soup.py
evilhero_mylar/lib/bs4/tests/test_soup.py
# -*- coding: utf-8 -*- """Tests of Beautiful Soup as a whole.""" from pdb import set_trace import logging import unittest import sys import tempfile from bs4 import ( BeautifulSoup, BeautifulStoneSoup, ) from bs4.element import ( CharsetMetaAttributeValue, ContentMetaAttributeValue, SoupStrainer, NamespacedAttribute, ) import bs4.dammit from bs4.dammit import ( EntitySubstitution, UnicodeDammit, EncodingDetector, ) from bs4.testing import ( SoupTest, skipIf, ) import warnings try: from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML LXML_PRESENT = True except ImportError, e: LXML_PRESENT = False PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2)) class TestConstructor(SoupTest): def test_short_unicode_input(self): data = u"<h1>éé</h1>" soup = self.soup(data) self.assertEqual(u"éé", soup.h1.string) def test_embedded_null(self): data = u"<h1>foo\0bar</h1>" soup = self.soup(data) self.assertEqual(u"foo\0bar", soup.h1.string) def test_exclude_encodings(self): utf8_data = u"Räksmörgås".encode("utf-8") soup = self.soup(utf8_data, exclude_encodings=["utf-8"]) self.assertEqual("windows-1252", soup.original_encoding) class TestWarnings(SoupTest): def _no_parser_specified(self, s, is_there=True): v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80]) self.assertTrue(v) def test_warning_if_no_parser_specified(self): with warnings.catch_warnings(record=True) as w: soup = self.soup("<a><b></b></a>") msg = str(w[0].message) self._assert_no_parser_specified(msg) def test_warning_if_parser_specified_too_vague(self): with warnings.catch_warnings(record=True) as w: soup = self.soup("<a><b></b></a>", "html") msg = str(w[0].message) self._assert_no_parser_specified(msg) def test_no_warning_if_explicit_parser_specified(self): with warnings.catch_warnings(record=True) as w: soup = self.soup("<a><b></b></a>", "html.parser") self.assertEqual([], w) def test_parseOnlyThese_renamed_to_parse_only(self): with warnings.catch_warnings(record=True) as w: soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b")) msg = str(w[0].message) self.assertTrue("parseOnlyThese" in msg) self.assertTrue("parse_only" in msg) self.assertEqual(b"<b></b>", soup.encode()) def test_fromEncoding_renamed_to_from_encoding(self): with warnings.catch_warnings(record=True) as w: utf8 = b"\xc3\xa9" soup = self.soup(utf8, fromEncoding="utf8") msg = str(w[0].message) self.assertTrue("fromEncoding" in msg) self.assertTrue("from_encoding" in msg) self.assertEqual("utf8", soup.original_encoding) def test_unrecognized_keyword_argument(self): self.assertRaises( TypeError, self.soup, "<a>", no_such_argument=True) class TestWarnings(SoupTest): def test_disk_file_warning(self): filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name try: with warnings.catch_warnings(record=True) as w: soup = self.soup(filename) msg = str(w[0].message) self.assertTrue("looks like a filename" in msg) finally: filehandle.close() # The file no longer exists, so Beautiful Soup will no longer issue the warning. with warnings.catch_warnings(record=True) as w: soup = self.soup(filename) self.assertEqual(0, len(w)) def test_url_warning_with_bytes_url(self): with warnings.catch_warnings(record=True) as warning_list: soup = self.soup(b"http://www.crummybytes.com/") # Be aware this isn't the only warning that can be raised during # execution.. self.assertTrue(any("looks like a URL" in str(w.message) for w in warning_list)) def test_url_warning_with_unicode_url(self): with warnings.catch_warnings(record=True) as warning_list: # note - this url must differ from the bytes one otherwise # python's warnings system swallows the second warning soup = self.soup(u"http://www.crummyunicode.com/") self.assertTrue(any("looks like a URL" in str(w.message) for w in warning_list)) def test_url_warning_with_bytes_and_space(self): with warnings.catch_warnings(record=True) as warning_list: soup = self.soup(b"http://www.crummybytes.com/ is great") self.assertFalse(any("looks like a URL" in str(w.message) for w in warning_list)) def test_url_warning_with_unicode_and_space(self): with warnings.catch_warnings(record=True) as warning_list: soup = self.soup(u"http://www.crummyuncode.com/ is great") self.assertFalse(any("looks like a URL" in str(w.message) for w in warning_list)) class TestSelectiveParsing(SoupTest): def test_parse_with_soupstrainer(self): markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>" strainer = SoupStrainer("b") soup = self.soup(markup, parse_only=strainer) self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>") class TestEntitySubstitution(unittest.TestCase): """Standalone tests of the EntitySubstitution class.""" def setUp(self): self.sub = EntitySubstitution def test_simple_html_substitution(self): # Unicode characters corresponding to named HTML entites # are substituted, and no others. s = u"foo\u2200\N{SNOWMAN}\u00f5bar" self.assertEqual(self.sub.substitute_html(s), u"foo&forall;\N{SNOWMAN}&otilde;bar") def test_smart_quote_substitution(self): # MS smart quotes are a common source of frustration, so we # give them a special test. quotes = b"\x91\x92foo\x93\x94" dammit = UnicodeDammit(quotes) self.assertEqual(self.sub.substitute_html(dammit.markup), "&lsquo;&rsquo;foo&ldquo;&rdquo;") def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self): s = 'Welcome to "my bar"' self.assertEqual(self.sub.substitute_xml(s, False), s) def test_xml_attribute_quoting_normally_uses_double_quotes(self): self.assertEqual(self.sub.substitute_xml("Welcome", True), '"Welcome"') self.assertEqual(self.sub.substitute_xml("Bob's Bar", True), '"Bob\'s Bar"') def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self): s = 'Welcome to "my bar"' self.assertEqual(self.sub.substitute_xml(s, True), "'Welcome to \"my bar\"'") def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self): s = 'Welcome to "Bob\'s Bar"' self.assertEqual( self.sub.substitute_xml(s, True), '"Welcome to &quot;Bob\'s Bar&quot;"') def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self): quoted = 'Welcome to "Bob\'s Bar"' self.assertEqual(self.sub.substitute_xml(quoted), quoted) def test_xml_quoting_handles_angle_brackets(self): self.assertEqual( self.sub.substitute_xml("foo<bar>"), "foo&lt;bar&gt;") def test_xml_quoting_handles_ampersands(self): self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&amp;T") def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self): self.assertEqual( self.sub.substitute_xml("&Aacute;T&T"), "&amp;Aacute;T&amp;T") def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self): self.assertEqual( self.sub.substitute_xml_containing_entities("&Aacute;T&T"), "&Aacute;T&amp;T") def test_quotes_not_html_substituted(self): """There's no need to do this except inside attribute values.""" text = 'Bob\'s "bar"' self.assertEqual(self.sub.substitute_html(text), text) class TestEncodingConversion(SoupTest): # Test Beautiful Soup's ability to decode and encode from various # encodings. def setUp(self): super(TestEncodingConversion, self).setUp() self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>' self.utf8_data = self.unicode_data.encode("utf-8") # Just so you know what it looks like. self.assertEqual( self.utf8_data, b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>') def test_ascii_in_unicode_out(self): # ASCII input is converted to Unicode. The original_encoding # attribute is set to 'utf-8', a superset of ASCII. chardet = bs4.dammit.chardet_dammit logging.disable(logging.WARNING) try: def noop(str): return None # Disable chardet, which will realize that the ASCII is ASCII. bs4.dammit.chardet_dammit = noop ascii = b"<foo>a</foo>" soup_from_ascii = self.soup(ascii) unicode_output = soup_from_ascii.decode() self.assertTrue(isinstance(unicode_output, unicode)) self.assertEqual(unicode_output, self.document_for(ascii.decode())) self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8") finally: logging.disable(logging.NOTSET) bs4.dammit.chardet_dammit = chardet def test_unicode_in_unicode_out(self): # Unicode input is left alone. The original_encoding attribute # is not set. soup_from_unicode = self.soup(self.unicode_data) self.assertEqual(soup_from_unicode.decode(), self.unicode_data) self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!') self.assertEqual(soup_from_unicode.original_encoding, None) def test_utf8_in_unicode_out(self): # UTF-8 input is converted to Unicode. The original_encoding # attribute is set. soup_from_utf8 = self.soup(self.utf8_data) self.assertEqual(soup_from_utf8.decode(), self.unicode_data) self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!') def test_utf8_out(self): # The internal data structures can be encoded as UTF-8. soup_from_unicode = self.soup(self.unicode_data) self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data) @skipIf( PYTHON_3_PRE_3_2, "Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.") def test_attribute_name_containing_unicode_characters(self): markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>' self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8")) class TestUnicodeDammit(unittest.TestCase): """Standalone tests of UnicodeDammit.""" def test_unicode_input(self): markup = u"I'm already Unicode! \N{SNOWMAN}" dammit = UnicodeDammit(markup) self.assertEqual(dammit.unicode_markup, markup) def test_smart_quotes_to_unicode(self): markup = b"<foo>\x91\x92\x93\x94</foo>" dammit = UnicodeDammit(markup) self.assertEqual( dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>") def test_smart_quotes_to_xml_entities(self): markup = b"<foo>\x91\x92\x93\x94</foo>" dammit = UnicodeDammit(markup, smart_quotes_to="xml") self.assertEqual( dammit.unicode_markup, "<foo>&#x2018;&#x2019;&#x201C;&#x201D;</foo>") def test_smart_quotes_to_html_entities(self): markup = b"<foo>\x91\x92\x93\x94</foo>" dammit = UnicodeDammit(markup, smart_quotes_to="html") self.assertEqual( dammit.unicode_markup, "<foo>&lsquo;&rsquo;&ldquo;&rdquo;</foo>") def test_smart_quotes_to_ascii(self): markup = b"<foo>\x91\x92\x93\x94</foo>" dammit = UnicodeDammit(markup, smart_quotes_to="ascii") self.assertEqual( dammit.unicode_markup, """<foo>''""</foo>""") def test_detect_utf8(self): utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83" dammit = UnicodeDammit(utf8) self.assertEqual(dammit.original_encoding.lower(), 'utf-8') self.assertEqual(dammit.unicode_markup, u'Sacr\xe9 bleu! \N{SNOWMAN}') def test_convert_hebrew(self): hebrew = b"\xed\xe5\xec\xf9" dammit = UnicodeDammit(hebrew, ["iso-8859-8"]) self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8') self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9') def test_dont_see_smart_quotes_where_there_are_none(self): utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch" dammit = UnicodeDammit(utf_8) self.assertEqual(dammit.original_encoding.lower(), 'utf-8') self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8) def test_ignore_inappropriate_codecs(self): utf8_data = u"Räksmörgås".encode("utf-8") dammit = UnicodeDammit(utf8_data, ["iso-8859-8"]) self.assertEqual(dammit.original_encoding.lower(), 'utf-8') def test_ignore_invalid_codecs(self): utf8_data = u"Räksmörgås".encode("utf-8") for bad_encoding in ['.utf8', '...', 'utF---16.!']: dammit = UnicodeDammit(utf8_data, [bad_encoding]) self.assertEqual(dammit.original_encoding.lower(), 'utf-8') def test_exclude_encodings(self): # This is UTF-8. utf8_data = u"Räksmörgås".encode("utf-8") # But if we exclude UTF-8 from consideration, the guess is # Windows-1252. dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"]) self.assertEqual(dammit.original_encoding.lower(), 'windows-1252') # And if we exclude that, there is no valid guess at all. dammit = UnicodeDammit( utf8_data, exclude_encodings=["utf-8", "windows-1252"]) self.assertEqual(dammit.original_encoding, None) def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self): detected = EncodingDetector( b'<?xml version="1.0" encoding="UTF-\xdb" ?>') encodings = list(detected.encodings) assert u'utf-\N{REPLACEMENT CHARACTER}' in encodings def test_detect_html5_style_meta_tag(self): for data in ( b'<html><meta charset="euc-jp" /></html>', b"<html><meta charset='euc-jp' /></html>", b"<html><meta charset=euc-jp /></html>", b"<html><meta charset=euc-jp/></html>"): dammit = UnicodeDammit(data, is_html=True) self.assertEqual( "euc-jp", dammit.original_encoding) def test_last_ditch_entity_replacement(self): # This is a UTF-8 document that contains bytestrings # completely incompatible with UTF-8 (ie. encoded with some other # encoding). # # Since there is no consistent encoding for the document, # Unicode, Dammit will eventually encode the document as UTF-8 # and encode the incompatible characters as REPLACEMENT # CHARACTER. # # If chardet is installed, it will detect that the document # can be converted into ISO-8859-1 without errors. This happens # to be the wrong encoding, but it is a consistent encoding, so the # code we're testing here won't run. # # So we temporarily disable chardet if it's present. doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?> <html><b>\330\250\330\252\330\261</b> <i>\310\322\321\220\312\321\355\344</i></html>""" chardet = bs4.dammit.chardet_dammit logging.disable(logging.WARNING) try: def noop(str): return None bs4.dammit.chardet_dammit = noop dammit = UnicodeDammit(doc) self.assertEqual(True, dammit.contains_replacement_characters) self.assertTrue(u"\ufffd" in dammit.unicode_markup) soup = BeautifulSoup(doc, "html.parser") self.assertTrue(soup.contains_replacement_characters) finally: logging.disable(logging.NOTSET) bs4.dammit.chardet_dammit = chardet def test_byte_order_mark_removed(self): # A document written in UTF-16LE will have its byte order marker stripped. data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00' dammit = UnicodeDammit(data) self.assertEqual(u"<a>áé</a>", dammit.unicode_markup) self.assertEqual("utf-16le", dammit.original_encoding) def test_detwingle(self): # Here's a UTF8 document. utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8") # Here's a Windows-1252 document. windows_1252 = ( u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!" u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252") # Through some unholy alchemy, they've been stuck together. doc = utf8 + windows_1252 + utf8 # The document can't be turned into UTF-8: self.assertRaises(UnicodeDecodeError, doc.decode, "utf8") # Unicode, Dammit thinks the whole document is Windows-1252, # and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃" # But if we run it through fix_embedded_windows_1252, it's fixed: fixed = UnicodeDammit.detwingle(doc) self.assertEqual( u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8")) def test_detwingle_ignores_multibyte_characters(self): # Each of these characters has a UTF-8 representation ending # in \x93. \x93 is a smart quote if interpreted as # Windows-1252. But our code knows to skip over multibyte # UTF-8 characters, so they'll survive the process unscathed. for tricky_unicode_char in ( u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93' u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93' u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one. ): input = tricky_unicode_char.encode("utf8") self.assertTrue(input.endswith(b'\x93')) output = UnicodeDammit.detwingle(input) self.assertEqual(output, input) class TestNamedspacedAttribute(SoupTest): def test_name_may_be_none(self): a = NamespacedAttribute("xmlns", None) self.assertEqual(a, "xmlns") def test_attribute_is_equivalent_to_colon_separated_string(self): a = NamespacedAttribute("a", "b") self.assertEqual("a:b", a) def test_attributes_are_equivalent_if_prefix_and_name_identical(self): a = NamespacedAttribute("a", "b", "c") b = NamespacedAttribute("a", "b", "c") self.assertEqual(a, b) # The actual namespace is not considered. c = NamespacedAttribute("a", "b", None) self.assertEqual(a, c) # But name and prefix are important. d = NamespacedAttribute("a", "z", "c") self.assertNotEqual(a, d) e = NamespacedAttribute("z", "b", "c") self.assertNotEqual(a, e) class TestAttributeValueWithCharsetSubstitution(unittest.TestCase): def test_content_meta_attribute_value(self): value = CharsetMetaAttributeValue("euc-jp") self.assertEqual("euc-jp", value) self.assertEqual("euc-jp", value.original_value) self.assertEqual("utf8", value.encode("utf8")) def test_content_meta_attribute_value(self): value = ContentMetaAttributeValue("text/html; charset=euc-jp") self.assertEqual("text/html; charset=euc-jp", value) self.assertEqual("text/html; charset=euc-jp", value.original_value) self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
20,345
Python
.py
411
40.423358
149
0.644027
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,818
test_tree.py
evilhero_mylar/lib/bs4/tests/test_tree.py
# -*- coding: utf-8 -*- """Tests for Beautiful Soup's tree traversal methods. The tree traversal methods are the main advantage of using Beautiful Soup over just using a parser. Different parsers will build different Beautiful Soup trees given the same markup, but all Beautiful Soup trees can be traversed with the methods tested here. """ from pdb import set_trace import copy import pickle import re import warnings from bs4 import BeautifulSoup from bs4.builder import ( builder_registry, HTMLParserTreeBuilder, ) from bs4.element import ( PY3K, CData, Comment, Declaration, Doctype, NavigableString, SoupStrainer, Tag, ) from bs4.testing import ( SoupTest, skipIf, ) XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None) LXML_PRESENT = (builder_registry.lookup("lxml") is not None) class TreeTest(SoupTest): def assertSelects(self, tags, should_match): """Make sure that the given tags have the correct text. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag.string for tag in tags], should_match) def assertSelectsIDs(self, tags, should_match): """Make sure that the given tags have the correct IDs. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag['id'] for tag in tags], should_match) class TestFind(TreeTest): """Basic tests of the find() method. find() just calls find_all() with limit=1, so it's not tested all that thouroughly here. """ def test_find_tag(self): soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>") self.assertEqual(soup.find("b").string, "2") def test_unicode_text_find(self): soup = self.soup(u'<h1>Räksmörgås</h1>') self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås') def test_unicode_attribute_find(self): soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>') str(soup) self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text) def test_find_everything(self): """Test an optimization that finds all tags.""" soup = self.soup("<a>foo</a><b>bar</b>") self.assertEqual(2, len(soup.find_all())) def test_find_everything_with_name(self): """Test an optimization that finds all tags with a given name.""" soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>") self.assertEqual(2, len(soup.find_all('a'))) class TestFindAll(TreeTest): """Basic tests of the find_all() method.""" def test_find_all_text_nodes(self): """You can search the tree for text nodes.""" soup = self.soup("<html>Foo<b>bar</b>\xbb</html>") # Exact match. self.assertEqual(soup.find_all(string="bar"), [u"bar"]) self.assertEqual(soup.find_all(text="bar"), [u"bar"]) # Match any of a number of strings. self.assertEqual( soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"]) # Match a regular expression. self.assertEqual(soup.find_all(text=re.compile('.*')), [u"Foo", u"bar", u'\xbb']) # Match anything. self.assertEqual(soup.find_all(text=True), [u"Foo", u"bar", u'\xbb']) def test_find_all_limit(self): """You can limit the number of items returned by find_all.""" soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>") self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"]) self.assertSelects(soup.find_all('a', limit=1), ["1"]) self.assertSelects( soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"]) # A limit of 0 means no limit. self.assertSelects( soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"]) def test_calling_a_tag_is_calling_findall(self): soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>") self.assertSelects(soup('a', limit=1), ["1"]) self.assertSelects(soup.b(id="foo"), ["3"]) def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self): soup = self.soup("<a></a>") # Create a self-referential list. l = [] l.append(l) # Without special code in _normalize_search_value, this would cause infinite # recursion. self.assertEqual([], soup.find_all(l)) def test_find_all_resultset(self): """All find_all calls return a ResultSet""" soup = self.soup("<a></a>") result = soup.find_all("a") self.assertTrue(hasattr(result, "source")) result = soup.find_all(True) self.assertTrue(hasattr(result, "source")) result = soup.find_all(text="foo") self.assertTrue(hasattr(result, "source")) class TestFindAllBasicNamespaces(TreeTest): def test_find_by_namespaced_name(self): soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">') self.assertEqual("4", soup.find("mathml:msqrt").string) self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name) class TestFindAllByName(TreeTest): """Test ways of finding tags by tag name.""" def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup("""<a>First tag.</a> <b>Second tag.</b> <c>Third <a>Nested tag.</a> tag.</c>""") def test_find_all_by_tag_name(self): # Find all the <a> tags. self.assertSelects( self.tree.find_all('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_name_and_text(self): self.assertSelects( self.tree.find_all('a', text='First tag.'), ['First tag.']) self.assertSelects( self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.']) self.assertSelects( self.tree.find_all('a', text=re.compile("tag")), ['First tag.', 'Nested tag.']) def test_find_all_on_non_root_element(self): # You can call find_all on any node, not just the root. self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.']) def test_calling_element_invokes_find_all(self): self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_strainer(self): self.assertSelects( self.tree.find_all(SoupStrainer('a')), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_names(self): self.assertSelects( self.tree.find_all(['a', 'b']), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_dict(self): self.assertSelects( self.tree.find_all({'a' : True, 'b' : True}), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_re(self): self.assertSelects( self.tree.find_all(re.compile('^[ab]$')), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_with_tags_matching_method(self): # You can define an oracle method that determines whether # a tag matches the search. def id_matches_name(tag): return tag.name == tag.get('id') tree = self.soup("""<a id="a">Match 1.</a> <a id="1">Does not match.</a> <b id="b">Match 2.</a>""") self.assertSelects( tree.find_all(id_matches_name), ["Match 1.", "Match 2."]) def test_find_with_multi_valued_attribute(self): soup = self.soup( "<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>" ) r1 = soup.find('div', 'a d'); r2 = soup.find('div', re.compile(r'a d')); r3, r4 = soup.find_all('div', ['a b', 'a d']); self.assertEqual('3', r1.string) self.assertEqual('3', r2.string) self.assertEqual('1', r3.string) self.assertEqual('3', r4.string) class TestFindAllByAttribute(TreeTest): def test_find_all_by_attribute_name(self): # You can pass in keyword arguments to find_all to search by # attribute. tree = self.soup(""" <a id="first">Matching a.</a> <a id="second"> Non-matching <b id="first">Matching b.</b>a. </a>""") self.assertSelects(tree.find_all(id='first'), ["Matching a.", "Matching b."]) def test_find_all_by_utf8_attribute_value(self): peace = u"םולש".encode("utf8") data = u'<a title="םולש"></a>'.encode("utf8") soup = self.soup(data) self.assertEqual([soup.a], soup.find_all(title=peace)) self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8"))) self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"])) def test_find_all_by_attribute_dict(self): # You can pass in a dictionary as the argument 'attrs'. This # lets you search for attributes like 'name' (a fixed argument # to find_all) and 'class' (a reserved word in Python.) tree = self.soup(""" <a name="name1" class="class1">Name match.</a> <a name="name2" class="class2">Class match.</a> <a name="name3" class="class3">Non-match.</a> <name1>A tag called 'name1'.</name1> """) # This doesn't do what you want. self.assertSelects(tree.find_all(name='name1'), ["A tag called 'name1'."]) # This does what you want. self.assertSelects(tree.find_all(attrs={'name' : 'name1'}), ["Name match."]) self.assertSelects(tree.find_all(attrs={'class' : 'class2'}), ["Class match."]) def test_find_all_by_class(self): tree = self.soup(""" <a class="1">Class 1.</a> <a class="2">Class 2.</a> <b class="1">Class 1.</b> <c class="3 4">Class 3 and 4.</c> """) # Passing in the class_ keyword argument will search against # the 'class' attribute. self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.']) self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.']) # Passing in a string to 'attrs' will also search the CSS class. self.assertSelects(tree.find_all('a', '1'), ['Class 1.']) self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.']) self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.']) def test_find_by_class_when_multiple_classes_present(self): tree = self.soup("<gar class='foo bar'>Found it</gar>") f = tree.find_all("gar", class_=re.compile("o")) self.assertSelects(f, ["Found it"]) f = tree.find_all("gar", class_=re.compile("a")) self.assertSelects(f, ["Found it"]) # If the search fails to match the individual strings "foo" and "bar", # it will be tried against the combined string "foo bar". f = tree.find_all("gar", class_=re.compile("o b")) self.assertSelects(f, ["Found it"]) def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self): soup = self.soup("<a class='bar'>Found it</a>") self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"]) def big_attribute_value(value): return len(value) > 3 self.assertSelects(soup.find_all("a", big_attribute_value), []) def small_attribute_value(value): return len(value) <= 3 self.assertSelects( soup.find_all("a", small_attribute_value), ["Found it"]) def test_find_all_with_string_for_attrs_finds_multiple_classes(self): soup = self.soup('<a class="foo bar"></a><a class="foo"></a>') a, a2 = soup.find_all("a") self.assertEqual([a, a2], soup.find_all("a", "foo")) self.assertEqual([a], soup.find_all("a", "bar")) # If you specify the class as a string that contains a # space, only that specific value will be found. self.assertEqual([a], soup.find_all("a", class_="foo bar")) self.assertEqual([a], soup.find_all("a", "foo bar")) self.assertEqual([], soup.find_all("a", "bar foo")) def test_find_all_by_attribute_soupstrainer(self): tree = self.soup(""" <a id="first">Match.</a> <a id="second">Non-match.</a>""") strainer = SoupStrainer(attrs={'id' : 'first'}) self.assertSelects(tree.find_all(strainer), ['Match.']) def test_find_all_with_missing_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that do not have that attribute set. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects(tree.find_all('a', id=None), ["No ID present."]) def test_find_all_with_defined_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that have that attribute set to any value. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects( tree.find_all(id=True), ["ID present.", "ID is empty."]) def test_find_all_with_numeric_attribute(self): # If you search for a number, it's treated as a string. tree = self.soup("""<a id=1>Unquoted attribute.</a> <a id="1">Quoted attribute.</a>""") expected = ["Unquoted attribute.", "Quoted attribute."] self.assertSelects(tree.find_all(id=1), expected) self.assertSelects(tree.find_all(id="1"), expected) def test_find_all_with_list_attribute_values(self): # You can pass a list of attribute values instead of just one, # and you'll get tags that match any of the values. tree = self.soup("""<a id="1">1</a> <a id="2">2</a> <a id="3">3</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=["1", "3", "4"]), ["1", "3"]) def test_find_all_with_regular_expression_attribute_value(self): # You can pass a regular expression as an attribute value, and # you'll get tags whose values for that attribute match the # regular expression. tree = self.soup("""<a id="a">One a.</a> <a id="aa">Two as.</a> <a id="ab">Mixed as and bs.</a> <a id="b">One b.</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=re.compile("^a+$")), ["One a.", "Two as."]) def test_find_by_name_and_containing_string(self): soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>") a = soup.a self.assertEqual([a], soup.find_all("a", text="foo")) self.assertEqual([], soup.find_all("a", text="bar")) self.assertEqual([], soup.find_all("a", text="bar")) def test_find_by_name_and_containing_string_when_string_is_buried(self): soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>") self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo")) def test_find_by_attribute_and_containing_string(self): soup = self.soup('<b id="1">foo</b><a id="2">foo</a>') a = soup.a self.assertEqual([a], soup.find_all(id=2, text="foo")) self.assertEqual([], soup.find_all(id=1, text="bar")) class TestIndex(TreeTest): """Test Tag.index""" def test_index(self): tree = self.soup("""<div> <a>Identical</a> <b>Not identical</b> <a>Identical</a> <c><d>Identical with child</d></c> <b>Also not identical</b> <c><d>Identical with child</d></c> </div>""") div = tree.div for i, element in enumerate(div.contents): self.assertEqual(i, div.index(element)) self.assertRaises(ValueError, tree.index, 1) class TestParentOperations(TreeTest): """Test navigation and searching through an element's parents.""" def setUp(self): super(TestParentOperations, self).setUp() self.tree = self.soup('''<ul id="empty"></ul> <ul id="top"> <ul id="middle"> <ul id="bottom"> <b>Start here</b> </ul> </ul>''') self.start = self.tree.b def test_parent(self): self.assertEqual(self.start.parent['id'], 'bottom') self.assertEqual(self.start.parent.parent['id'], 'middle') self.assertEqual(self.start.parent.parent.parent['id'], 'top') def test_parent_of_top_tag_is_soup_object(self): top_tag = self.tree.contents[0] self.assertEqual(top_tag.parent, self.tree) def test_soup_object_has_no_parent(self): self.assertEqual(None, self.tree.parent) def test_find_parents(self): self.assertSelectsIDs( self.start.find_parents('ul'), ['bottom', 'middle', 'top']) self.assertSelectsIDs( self.start.find_parents('ul', id="middle"), ['middle']) def test_find_parent(self): self.assertEqual(self.start.find_parent('ul')['id'], 'bottom') self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top') def test_parent_of_text_element(self): text = self.tree.find(text="Start here") self.assertEqual(text.parent.name, 'b') def test_text_element_find_parent(self): text = self.tree.find(text="Start here") self.assertEqual(text.find_parent('ul')['id'], 'bottom') def test_parent_generator(self): parents = [parent['id'] for parent in self.start.parents if parent is not None and 'id' in parent.attrs] self.assertEqual(parents, ['bottom', 'middle', 'top']) class ProximityTest(TreeTest): def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup( '<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>') class TestNextOperations(ProximityTest): def setUp(self): super(TestNextOperations, self).setUp() self.start = self.tree.b def test_next(self): self.assertEqual(self.start.next_element, "One") self.assertEqual(self.start.next_element.next_element['id'], "2") def test_next_of_last_item_is_none(self): last = self.tree.find(text="Three") self.assertEqual(last.next_element, None) def test_next_of_root_is_none(self): # The document root is outside the next/previous chain. self.assertEqual(self.tree.next_element, None) def test_find_all_next(self): self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"]) self.start.find_all_next(id=3) self.assertSelects(self.start.find_all_next(id=3), ["Three"]) def test_find_next(self): self.assertEqual(self.start.find_next('b')['id'], '2') self.assertEqual(self.start.find_next(text="Three"), "Three") def test_find_next_for_text_element(self): text = self.tree.find(text="One") self.assertEqual(text.find_next("b").string, "Two") self.assertSelects(text.find_all_next("b"), ["Two", "Three"]) def test_next_generator(self): start = self.tree.find(text="Two") successors = [node for node in start.next_elements] # There are two successors: the final <b> tag and its text contents. tag, contents = successors self.assertEqual(tag['id'], '3') self.assertEqual(contents, "Three") class TestPreviousOperations(ProximityTest): def setUp(self): super(TestPreviousOperations, self).setUp() self.end = self.tree.find(text="Three") def test_previous(self): self.assertEqual(self.end.previous_element['id'], "3") self.assertEqual(self.end.previous_element.previous_element, "Two") def test_previous_of_first_item_is_none(self): first = self.tree.find('html') self.assertEqual(first.previous_element, None) def test_previous_of_root_is_none(self): # The document root is outside the next/previous chain. # XXX This is broken! #self.assertEqual(self.tree.previous_element, None) pass def test_find_all_previous(self): # The <b> tag containing the "Three" node is the predecessor # of the "Three" node itself, which is why "Three" shows up # here. self.assertSelects( self.end.find_all_previous('b'), ["Three", "Two", "One"]) self.assertSelects(self.end.find_all_previous(id=1), ["One"]) def test_find_previous(self): self.assertEqual(self.end.find_previous('b')['id'], '3') self.assertEqual(self.end.find_previous(text="One"), "One") def test_find_previous_for_text_element(self): text = self.tree.find(text="Three") self.assertEqual(text.find_previous("b").string, "Three") self.assertSelects( text.find_all_previous("b"), ["Three", "Two", "One"]) def test_previous_generator(self): start = self.tree.find(text="One") predecessors = [node for node in start.previous_elements] # There are four predecessors: the <b> tag containing "One" # the <body> tag, the <head> tag, and the <html> tag. b, body, head, html = predecessors self.assertEqual(b['id'], '1') self.assertEqual(body.name, "body") self.assertEqual(head.name, "head") self.assertEqual(html.name, "html") class SiblingTest(TreeTest): def setUp(self): super(SiblingTest, self).setUp() markup = '''<html> <span id="1"> <span id="1.1"></span> </span> <span id="2"> <span id="2.1"></span> </span> <span id="3"> <span id="3.1"></span> </span> <span id="4"></span> </html>''' # All that whitespace looks good but makes the tests more # difficult. Get rid of it. markup = re.compile("\n\s*").sub("", markup) self.tree = self.soup(markup) class TestNextSibling(SiblingTest): def setUp(self): super(TestNextSibling, self).setUp() self.start = self.tree.find(id="1") def test_next_sibling_of_root_is_none(self): self.assertEqual(self.tree.next_sibling, None) def test_next_sibling(self): self.assertEqual(self.start.next_sibling['id'], '2') self.assertEqual(self.start.next_sibling.next_sibling['id'], '3') # Note the difference between next_sibling and next_element. self.assertEqual(self.start.next_element['id'], '1.1') def test_next_sibling_may_not_exist(self): self.assertEqual(self.tree.html.next_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.next_sibling, None) last_span = self.tree.find(id="4") self.assertEqual(last_span.next_sibling, None) def test_find_next_sibling(self): self.assertEqual(self.start.find_next_sibling('span')['id'], '2') def test_next_siblings(self): self.assertSelectsIDs(self.start.find_next_siblings("span"), ['2', '3', '4']) self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3']) def test_next_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="Foo") self.assertEqual(start.next_sibling.name, 'b') self.assertEqual(start.next_sibling.next_sibling, 'baz') self.assertSelects(start.find_next_siblings('b'), ['bar']) self.assertEqual(start.find_next_sibling(text="baz"), "baz") self.assertEqual(start.find_next_sibling(text="nonesuch"), None) class TestPreviousSibling(SiblingTest): def setUp(self): super(TestPreviousSibling, self).setUp() self.end = self.tree.find(id="4") def test_previous_sibling_of_root_is_none(self): self.assertEqual(self.tree.previous_sibling, None) def test_previous_sibling(self): self.assertEqual(self.end.previous_sibling['id'], '3') self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2') # Note the difference between previous_sibling and previous_element. self.assertEqual(self.end.previous_element['id'], '3.1') def test_previous_sibling_may_not_exist(self): self.assertEqual(self.tree.html.previous_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.previous_sibling, None) first_span = self.tree.find(id="1") self.assertEqual(first_span.previous_sibling, None) def test_find_previous_sibling(self): self.assertEqual(self.end.find_previous_sibling('span')['id'], '3') def test_previous_siblings(self): self.assertSelectsIDs(self.end.find_previous_siblings("span"), ['3', '2', '1']) self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1']) def test_previous_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="baz") self.assertEqual(start.previous_sibling.name, 'b') self.assertEqual(start.previous_sibling.previous_sibling, 'Foo') self.assertSelects(start.find_previous_siblings('b'), ['bar']) self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo") self.assertEqual(start.find_previous_sibling(text="nonesuch"), None) class TestTagCreation(SoupTest): """Test the ability to create new tags.""" def test_new_tag(self): soup = self.soup("") new_tag = soup.new_tag("foo", bar="baz") self.assertTrue(isinstance(new_tag, Tag)) self.assertEqual("foo", new_tag.name) self.assertEqual(dict(bar="baz"), new_tag.attrs) self.assertEqual(None, new_tag.parent) def test_tag_inherits_self_closing_rules_from_builder(self): if XML_BUILDER_PRESENT: xml_soup = BeautifulSoup("", "lxml-xml") xml_br = xml_soup.new_tag("br") xml_p = xml_soup.new_tag("p") # Both the <br> and <p> tag are empty-element, just because # they have no contents. self.assertEqual(b"<br/>", xml_br.encode()) self.assertEqual(b"<p/>", xml_p.encode()) html_soup = BeautifulSoup("", "html.parser") html_br = html_soup.new_tag("br") html_p = html_soup.new_tag("p") # The HTML builder users HTML's rules about which tags are # empty-element tags, and the new tags reflect these rules. self.assertEqual(b"<br/>", html_br.encode()) self.assertEqual(b"<p></p>", html_p.encode()) def test_new_string_creates_navigablestring(self): soup = self.soup("") s = soup.new_string("foo") self.assertEqual("foo", s) self.assertTrue(isinstance(s, NavigableString)) def test_new_string_can_create_navigablestring_subclass(self): soup = self.soup("") s = soup.new_string("foo", Comment) self.assertEqual("foo", s) self.assertTrue(isinstance(s, Comment)) class TestTreeModification(SoupTest): def test_attribute_modification(self): soup = self.soup('<a id="1"></a>') soup.a['id'] = 2 self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>')) del(soup.a['id']) self.assertEqual(soup.decode(), self.document_for('<a></a>')) soup.a['id2'] = 'foo' self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>')) def test_new_tag_creation(self): builder = builder_registry.lookup('html')() soup = self.soup("<body></body>", builder=builder) a = Tag(soup, builder, 'a') ol = Tag(soup, builder, 'ol') a['href'] = 'http://foo.com/' soup.body.insert(0, a) soup.body.insert(1, ol) self.assertEqual( soup.body.encode(), b'<body><a href="http://foo.com/"></a><ol></ol></body>') def test_append_to_contents_moves_tag(self): doc = """<p id="1">Don't leave me <b>here</b>.</p> <p id="2">Don\'t leave!</p>""" soup = self.soup(doc) second_para = soup.find(id='2') bold = soup.b # Move the <b> tag to the end of the second paragraph. soup.find(id='2').append(soup.b) # The <b> tag is now a child of the second paragraph. self.assertEqual(bold.parent, second_para) self.assertEqual( soup.decode(), self.document_for( '<p id="1">Don\'t leave me .</p>\n' '<p id="2">Don\'t leave!<b>here</b></p>')) def test_replace_with_returns_thing_that_was_replaced(self): text = "<a></a><b><c></c></b>" soup = self.soup(text) a = soup.a new_a = a.replace_with(soup.c) self.assertEqual(a, new_a) def test_unwrap_returns_thing_that_was_replaced(self): text = "<a><b></b><c></c></a>" soup = self.soup(text) a = soup.a new_a = a.unwrap() self.assertEqual(a, new_a) def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self): soup = self.soup("<a><b>Foo</b></a><c>Bar</c>") a = soup.a a.extract() self.assertEqual(None, a.parent) self.assertRaises(ValueError, a.unwrap) self.assertRaises(ValueError, a.replace_with, soup.c) def test_replace_tag_with_itself(self): text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>" soup = self.soup(text) c = soup.c soup.c.replace_with(c) self.assertEqual(soup.decode(), self.document_for(text)) def test_replace_tag_with_its_parent_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.b.replace_with, soup.a) def test_insert_tag_into_itself_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.a.insert, 0, soup.a) def test_replace_with_maintains_next_element_throughout(self): soup = self.soup('<p><a>one</a><b>three</b></p>') a = soup.a b = a.contents[0] # Make it so the <a> tag has two text children. a.insert(1, "two") # Now replace each one with the empty string. left, right = a.contents left.replaceWith('') right.replaceWith('') # The <b> tag is still connected to the tree. self.assertEqual("three", soup.b.string) def test_replace_final_node(self): soup = self.soup("<b>Argh!</b>") soup.find(text="Argh!").replace_with("Hooray!") new_text = soup.find(text="Hooray!") b = soup.b self.assertEqual(new_text.previous_element, b) self.assertEqual(new_text.parent, b) self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.next_element, None) def test_consecutive_text_nodes(self): # A builder should never create two consecutive text nodes, # but if you insert one next to another, Beautiful Soup will # handle it correctly. soup = self.soup("<a><b>Argh!</b><c></c></a>") soup.b.insert(1, "Hooray!") self.assertEqual( soup.decode(), self.document_for( "<a><b>Argh!Hooray!</b><c></c></a>")) new_text = soup.find(text="Hooray!") self.assertEqual(new_text.previous_element, "Argh!") self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.previous_sibling, "Argh!") self.assertEqual(new_text.previous_sibling.next_sibling, new_text) self.assertEqual(new_text.next_sibling, None) self.assertEqual(new_text.next_element, soup.c) def test_insert_string(self): soup = self.soup("<a></a>") soup.a.insert(0, "bar") soup.a.insert(0, "foo") # The string were added to the tag. self.assertEqual(["foo", "bar"], soup.a.contents) # And they were converted to NavigableStrings. self.assertEqual(soup.a.contents[0].next_element, "bar") def test_insert_tag(self): builder = self.default_builder soup = self.soup( "<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder) magic_tag = Tag(soup, builder, 'magictag') magic_tag.insert(0, "the") soup.a.insert(1, magic_tag) self.assertEqual( soup.decode(), self.document_for( "<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>")) # Make sure all the relationships are hooked up correctly. b_tag = soup.b self.assertEqual(b_tag.next_sibling, magic_tag) self.assertEqual(magic_tag.previous_sibling, b_tag) find = b_tag.find(text="Find") self.assertEqual(find.next_element, magic_tag) self.assertEqual(magic_tag.previous_element, find) c_tag = soup.c self.assertEqual(magic_tag.next_sibling, c_tag) self.assertEqual(c_tag.previous_sibling, magic_tag) the = magic_tag.find(text="the") self.assertEqual(the.parent, magic_tag) self.assertEqual(the.next_element, c_tag) self.assertEqual(c_tag.previous_element, the) def test_append_child_thats_already_at_the_end(self): data = "<a><b></b></a>" soup = self.soup(data) soup.a.append(soup.b) self.assertEqual(data, soup.decode()) def test_move_tag_to_beginning_of_parent(self): data = "<a><b></b><c></c><d></d></a>" soup = self.soup(data) soup.a.insert(0, soup.d) self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode()) def test_insert_works_on_empty_element_tag(self): # This is a little strange, since most HTML parsers don't allow # markup like this to come through. But in general, we don't # know what the parser would or wouldn't have allowed, so # I'm letting this succeed for now. soup = self.soup("<br/>") soup.br.insert(1, "Contents") self.assertEqual(str(soup.br), "<br>Contents</br>") def test_insert_before(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_before("BAZ") soup.a.insert_before("QUUX") self.assertEqual( soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>")) soup.a.insert_before(soup.b) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_after("BAZ") soup.a.insert_after("QUUX") self.assertEqual( soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ")) soup.b.insert_after(soup.a) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after_raises_exception_if_after_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_after, tag) self.assertRaises(NotImplementedError, soup.insert_after, tag) self.assertRaises(ValueError, tag.insert_after, tag) def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_before, tag) self.assertRaises(NotImplementedError, soup.insert_before, tag) self.assertRaises(ValueError, tag.insert_before, tag) def test_replace_with(self): soup = self.soup( "<p>There's <b>no</b> business like <b>show</b> business</p>") no, show = soup.find_all('b') show.replace_with(no) self.assertEqual( soup.decode(), self.document_for( "<p>There's business like <b>no</b> business</p>")) self.assertEqual(show.parent, None) self.assertEqual(no.parent, soup.p) self.assertEqual(no.next_element, "no") self.assertEqual(no.next_sibling, " business") def test_replace_first_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.b.replace_with(soup.c) self.assertEqual("<a><c></c></a>", soup.decode()) def test_replace_last_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.c.replace_with(soup.b) self.assertEqual("<a><b></b></a>", soup.decode()) def test_nested_tag_replace_with(self): soup = self.soup( """<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""") # Replace the entire <b> tag and its contents ("reserve the # right") with the <f> tag ("refuse"). remove_tag = soup.b move_tag = soup.f remove_tag.replace_with(move_tag) self.assertEqual( soup.decode(), self.document_for( "<a>We<f>refuse</f></a><e>to<g>service</g></e>")) # The <b> tag is now an orphan. self.assertEqual(remove_tag.parent, None) self.assertEqual(remove_tag.find(text="right").next_element, None) self.assertEqual(remove_tag.previous_element, None) self.assertEqual(remove_tag.next_sibling, None) self.assertEqual(remove_tag.previous_sibling, None) # The <f> tag is now connected to the <a> tag. self.assertEqual(move_tag.parent, soup.a) self.assertEqual(move_tag.previous_element, "We") self.assertEqual(move_tag.next_element.next_element, soup.e) self.assertEqual(move_tag.next_sibling, None) # The gap where the <f> tag used to be has been mended, and # the word "to" is now connected to the <g> tag. to_text = soup.find(text="to") g_tag = soup.g self.assertEqual(to_text.next_element, g_tag) self.assertEqual(to_text.next_sibling, g_tag) self.assertEqual(g_tag.previous_element, to_text) self.assertEqual(g_tag.previous_sibling, to_text) def test_unwrap(self): tree = self.soup(""" <p>Unneeded <em>formatting</em> is unneeded</p> """) tree.em.unwrap() self.assertEqual(tree.em, None) self.assertEqual(tree.p.text, "Unneeded formatting is unneeded") def test_wrap(self): soup = self.soup("I wish I was bold.") value = soup.string.wrap(soup.new_tag("b")) self.assertEqual(value.decode(), "<b>I wish I was bold.</b>") self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_extracts_tag_from_elsewhere(self): soup = self.soup("<b></b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_puts_new_contents_at_the_end(self): soup = self.soup("<b>I like being bold.</b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual(2, len(soup.b.contents)) self.assertEqual( soup.decode(), self.document_for( "<b>I like being bold.I wish I was bold.</b>")) def test_extract(self): soup = self.soup( '<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>') self.assertEqual(len(soup.body.contents), 3) extracted = soup.find(id="nav").extract() self.assertEqual( soup.decode(), "<html><body>Some content. More content.</body></html>") self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>') # The extracted tag is now an orphan. self.assertEqual(len(soup.body.contents), 2) self.assertEqual(extracted.parent, None) self.assertEqual(extracted.previous_element, None) self.assertEqual(extracted.next_element.next_element, None) # The gap where the extracted tag used to be has been mended. content_1 = soup.find(text="Some content. ") content_2 = soup.find(text=" More content.") self.assertEqual(content_1.next_element, content_2) self.assertEqual(content_1.next_sibling, content_2) self.assertEqual(content_2.previous_element, content_1) self.assertEqual(content_2.previous_sibling, content_1) def test_extract_distinguishes_between_identical_strings(self): soup = self.soup("<a>foo</a><b>bar</b>") foo_1 = soup.a.string bar_1 = soup.b.string foo_2 = soup.new_string("foo") bar_2 = soup.new_string("bar") soup.a.append(foo_2) soup.b.append(bar_2) # Now there are two identical strings in the <a> tag, and two # in the <b> tag. Let's remove the first "foo" and the second # "bar". foo_1.extract() bar_2.extract() self.assertEqual(foo_2, soup.a.string) self.assertEqual(bar_2, soup.b.string) def test_extract_multiples_of_same_tag(self): soup = self.soup(""" <html> <head> <script>foo</script> </head> <body> <script>bar</script> <a></a> </body> <script>baz</script> </html>""") [soup.script.extract() for i in soup.find_all("script")] self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body)) def test_extract_works_when_element_is_surrounded_by_identical_strings(self): soup = self.soup( '<html>\n' '<body>hi</body>\n' '</html>') soup.find('body').extract() self.assertEqual(None, soup.find('body')) def test_clear(self): """Tag.clear()""" soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>") # clear using extract() a = soup.a soup.p.clear() self.assertEqual(len(soup.p.contents), 0) self.assertTrue(hasattr(a, "contents")) # clear using decompose() em = a.em a.clear(decompose=True) self.assertEqual(0, len(em.contents)) def test_string_set(self): """Tag.string = 'string'""" soup = self.soup("<a></a> <b><c></c></b>") soup.a.string = "foo" self.assertEqual(soup.a.contents, ["foo"]) soup.b.string = "bar" self.assertEqual(soup.b.contents, ["bar"]) def test_string_set_does_not_affect_original_string(self): soup = self.soup("<a><b>foo</b><c>bar</c>") soup.b.string = soup.c.string self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>") def test_set_string_preserves_class_of_string(self): soup = self.soup("<a></a>") cdata = CData("foo") soup.a.string = cdata self.assertTrue(isinstance(soup.a.string, CData)) class TestElementObjects(SoupTest): """Test various features of element objects.""" def test_len(self): """The length of an element is its number of children.""" soup = self.soup("<top>1<b>2</b>3</top>") # The BeautifulSoup object itself contains one element: the # <top> tag. self.assertEqual(len(soup.contents), 1) self.assertEqual(len(soup), 1) # The <top> tag contains three elements: the text node "1", the # <b> tag, and the text node "3". self.assertEqual(len(soup.top), 3) self.assertEqual(len(soup.top.contents), 3) def test_member_access_invokes_find(self): """Accessing a Python member .foo invokes find('foo')""" soup = self.soup('<b><i></i></b>') self.assertEqual(soup.b, soup.find('b')) self.assertEqual(soup.b.i, soup.find('b').find('i')) self.assertEqual(soup.a, None) def test_deprecated_member_access(self): soup = self.soup('<b><i></i></b>') with warnings.catch_warnings(record=True) as w: tag = soup.bTag self.assertEqual(soup.b, tag) self.assertEqual( '.bTag is deprecated, use .find("b") instead.', str(w[0].message)) def test_has_attr(self): """has_attr() checks for the presence of an attribute. Please note note: has_attr() is different from __in__. has_attr() checks the tag's attributes and __in__ checks the tag's chidlren. """ soup = self.soup("<foo attr='bar'>") self.assertTrue(soup.foo.has_attr('attr')) self.assertFalse(soup.foo.has_attr('attr2')) def test_attributes_come_out_in_alphabetical_order(self): markup = '<b a="1" z="5" m="3" f="2" y="4"></b>' self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>') def test_string(self): # A tag that contains only a text node makes that node # available as .string. soup = self.soup("<b>foo</b>") self.assertEqual(soup.b.string, 'foo') def test_empty_tag_has_no_string(self): # A tag with no children has no .stirng. soup = self.soup("<b></b>") self.assertEqual(soup.b.string, None) def test_tag_with_multiple_children_has_no_string(self): # A tag with no children has no .string. soup = self.soup("<a>foo<b></b><b></b></b>") self.assertEqual(soup.b.string, None) soup = self.soup("<a>foo<b></b>bar</b>") self.assertEqual(soup.b.string, None) # Even if all the children are strings, due to trickery, # it won't work--but this would be a good optimization. soup = self.soup("<a>foo</b>") soup.a.insert(1, "bar") self.assertEqual(soup.a.string, None) def test_tag_with_recursive_string_has_string(self): # A tag with a single child which has a .string inherits that # .string. soup = self.soup("<a><b>foo</b></a>") self.assertEqual(soup.a.string, "foo") self.assertEqual(soup.string, "foo") def test_lack_of_string(self): """Only a tag containing a single text node has a .string.""" soup = self.soup("<b>f<i>e</i>o</b>") self.assertFalse(soup.b.string) soup = self.soup("<b></b>") self.assertFalse(soup.b.string) def test_all_text(self): """Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated""" soup = self.soup("<a>a<b>r</b> <r> t </r></a>") self.assertEqual(soup.a.text, "ar t ") self.assertEqual(soup.a.get_text(strip=True), "art") self.assertEqual(soup.a.get_text(","), "a,r, , t ") self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t") def test_get_text_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(soup.get_text(), "foobar") self.assertEqual( soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar") self.assertEqual( soup.get_text(types=None), "fooIGNOREbar") def test_all_strings_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(['foo', 'bar'], list(soup.strings)) class TestCDAtaListAttributes(SoupTest): """Testing cdata-list attributes like 'class'. """ def test_single_value_becomes_list(self): soup = self.soup("<a class='foo'>") self.assertEqual(["foo"],soup.a['class']) def test_multiple_values_becomes_list(self): soup = self.soup("<a class='foo bar'>") self.assertEqual(["foo", "bar"], soup.a['class']) def test_multiple_values_separated_by_weird_whitespace(self): soup = self.soup("<a class='foo\tbar\nbaz'>") self.assertEqual(["foo", "bar", "baz"],soup.a['class']) def test_attributes_joined_into_string_on_output(self): soup = self.soup("<a class='foo\tbar'>") self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode()) def test_get_attribute_list(self): soup = self.soup("<a id='abc def'>") self.assertEqual(['abc def'], soup.a.get_attribute_list('id')) def test_accept_charset(self): soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">') self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset']) def test_cdata_attribute_applying_only_to_one_tag(self): data = '<a accept-charset="ISO-8859-1 UTF-8"></a>' soup = self.soup(data) # We saw in another test that accept-charset is a cdata-list # attribute for the <form> tag. But it's not a cdata-list # attribute for any other tag. self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset']) def test_string_has_immutable_name_property(self): string = self.soup("s").string self.assertEqual(None, string.name) def t(): string.name = 'foo' self.assertRaises(AttributeError, t) class TestPersistence(SoupTest): "Testing features like pickle and deepcopy." def setUp(self): super(TestPersistence, self).setUp() self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/transitional.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Beautiful Soup: We called him Tortoise because he taught us.</title> <link rev="made" href="mailto:leonardr@segfault.org"> <meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping."> <meta name="generator" content="Markov Approximation 1.4 (module: leonardr)"> <meta name="author" content="Leonard Richardson"> </head> <body> <a href="foo">foo</a> <a href="foo"><b>bar</b></a> </body> </html>""" self.tree = self.soup(self.page) def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. dumped = pickle.dumps(self.tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), self.tree.decode()) def test_deepcopy_identity(self): # Making a deepcopy of a tree yields an identical tree. copied = copy.deepcopy(self.tree) self.assertEqual(copied.decode(), self.tree.decode()) def test_copy_preserves_encoding(self): soup = BeautifulSoup(b'<p>&nbsp;</p>', 'html.parser') encoding = soup.original_encoding copy = soup.__copy__() self.assertEqual(u"<p> </p>", unicode(copy)) self.assertEqual(encoding, copy.original_encoding) def test_unicode_pickle(self): # A tree containing Unicode characters can be pickled. html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) loaded = pickle.loads(dumped) self.assertEqual(loaded.decode(), soup.decode()) def test_copy_navigablestring_is_not_attached_to_tree(self): html = u"<b>Foo<a></a></b><b>Bar</b>" soup = self.soup(html) s1 = soup.find(string="Foo") s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertEqual(None, s2.parent) self.assertEqual(None, s2.next_element) self.assertNotEqual(None, s1.next_sibling) self.assertEqual(None, s2.next_sibling) self.assertEqual(None, s2.previous_element) def test_copy_navigablestring_subclass_has_same_type(self): html = u"<b><!--Foo--></b>" soup = self.soup(html) s1 = soup.string s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertTrue(isinstance(s2, Comment)) def test_copy_entire_soup(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) soup_copy = copy.copy(soup) self.assertEqual(soup, soup_copy) def test_copy_tag_copies_contents(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) div = soup.div div_copy = copy.copy(div) # The two tags look the same, and evaluate to equal. self.assertEqual(unicode(div), unicode(div_copy)) self.assertEqual(div, div_copy) # But they're not the same object. self.assertFalse(div is div_copy) # And they don't have the same relation to the parse tree. The # copy is not associated with a parse tree at all. self.assertEqual(None, div_copy.parent) self.assertEqual(None, div_copy.previous_element) self.assertEqual(None, div_copy.find(string='Bar').next_element) self.assertNotEqual(None, div.find(string='Bar').next_element) class TestSubstitutions(SoupTest): def test_default_formatter_is_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_html(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="html") self.assertEqual( decoded, self.document_for("<b>&lt;&lt;Sacr&eacute; bleu!&gt;&gt;</b>")) def test_formatter_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_null(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter=None) # Neither the angle brackets nor the e-with-acute are converted. # This is not valid HTML, but it's what the user wanted. self.assertEqual(decoded, self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>")) def test_formatter_custom(self): markup = u"<b>&lt;foo&gt;</b><b>bar</b>" soup = self.soup(markup) decoded = soup.decode(formatter = lambda x: x.upper()) # Instead of normal entity conversion code, the custom # callable is called on every string. self.assertEqual( decoded, self.document_for(u"<b><FOO></b><b>BAR</b>")) def test_formatter_is_run_on_attribute_values(self): markup = u'<a href="http://a.com?a=b&c=é">e</a>' soup = self.soup(markup) a = soup.a expect_minimal = u'<a href="http://a.com?a=b&amp;c=é">e</a>' self.assertEqual(expect_minimal, a.decode()) self.assertEqual(expect_minimal, a.decode(formatter="minimal")) expect_html = u'<a href="http://a.com?a=b&amp;c=&eacute;">e</a>' self.assertEqual(expect_html, a.decode(formatter="html")) self.assertEqual(markup, a.decode(formatter=None)) expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>' self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper())) def test_formatter_skips_script_tag_for_html_documents(self): doc = """ <script type="text/javascript"> console.log("< < hey > > "); </script> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_formatter_skips_style_tag_for_html_documents(self): doc = """ <style type="text/css"> console.log("< < hey > > "); </style> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_prettify_leaves_preformatted_text_alone(self): soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ") # Everything outside the <pre> tag is reformatted, but everything # inside is left alone. self.assertEqual( u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>', soup.div.prettify()) def test_prettify_accepts_formatter(self): soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser') pretty = soup.prettify(formatter = lambda x: x.upper()) self.assertTrue("FOO" in pretty) def test_prettify_outputs_unicode_by_default(self): soup = self.soup("<a></a>") self.assertEqual(unicode, type(soup.prettify())) def test_prettify_can_encode_data(self): soup = self.soup("<a></a>") self.assertEqual(bytes, type(soup.prettify("utf-8"))) def test_html_entity_substitution_off_by_default(self): markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>" soup = self.soup(markup) encoded = soup.b.encode("utf-8") self.assertEqual(encoded, markup.encode('utf-8')) def test_encoding_substitution(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') soup = self.soup(meta_tag) # Parse the document, and the charset apprears unchanged. self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis') # Encode the document into some encoding, and the encoding is # substituted into the meta tag. utf_8 = soup.encode("utf-8") self.assertTrue(b"charset=utf-8" in utf_8) euc_jp = soup.encode("euc_jp") self.assertTrue(b"charset=euc_jp" in euc_jp) shift_jis = soup.encode("shift-jis") self.assertTrue(b"charset=shift-jis" in shift_jis) utf_16_u = soup.encode("utf-16").decode("utf-16") self.assertTrue("charset=utf-16" in utf_16_u) def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self): markup = ('<head><meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/></head><pre>foo</pre>') # Beautiful Soup used to try to rewrite the meta tag even if the # meta tag got filtered out by the strainer. This test makes # sure that doesn't happen. strainer = SoupStrainer('pre') soup = self.soup(markup, parse_only=strainer) self.assertEqual(soup.contents[0].name, 'pre') class TestEncoding(SoupTest): """Test the ability to encode objects into strings.""" def test_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.string.encode("utf-8"), u"\N{SNOWMAN}".encode("utf-8")) def test_tag_containing_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( soup.b.encode("utf-8"), html.encode("utf-8")) def test_encoding_substitutes_unrecognized_characters_by_default(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.encode("ascii"), b"<b>&#9731;</b>") def test_encoding_can_be_made_strict(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertRaises( UnicodeEncodeError, soup.encode, "ascii", errors="strict") def test_decode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents()) def test_encode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( encoding="utf8")) def test_deprecated_renderContents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) def test_repr(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) if PY3K: self.assertEqual(html, repr(soup)) else: self.assertEqual(b'<b>\\u2603</b>', repr(soup)) class TestNavigableStringSubclasses(SoupTest): def test_cdata(self): # None of the current builders turn CDATA sections into CData # objects, but you can create them manually. soup = self.soup("") cdata = CData("foo") soup.insert(1, cdata) self.assertEqual(str(soup), "<![CDATA[foo]]>") self.assertEqual(soup.find(text="foo"), "foo") self.assertEqual(soup.contents[0], "foo") def test_cdata_is_never_formatted(self): """Text inside a CData object is passed into the formatter. But the return value is ignored. """ self.count = 0 def increment(*args): self.count += 1 return "BITTER FAILURE" soup = self.soup("") cdata = CData("<><><>") soup.insert(1, cdata) self.assertEqual( b"<![CDATA[<><><>]]>", soup.encode(formatter=increment)) self.assertEqual(1, self.count) def test_doctype_ends_in_newline(self): # Unlike other NavigableString subclasses, a DOCTYPE always ends # in a newline. doctype = Doctype("foo") soup = self.soup("") soup.insert(1, doctype) self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n") def test_declaration(self): d = Declaration("foo") self.assertEqual("<?foo?>", d.output_ready()) class TestSoupSelector(TreeTest): HTML = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <title>The title</title> <link rel="stylesheet" href="blah.css" type="text/css" id="l1"> </head> <body> <custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag> <div id="main" class="fancy"> <div id="inner"> <h1 id="header1">An H1</h1> <p>Some text</p> <p class="onep" id="p1">Some more text</p> <h2 id="header2">An H2</h2> <p class="class1 class2 class3" id="pmulti">Another</p> <a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a> <h2 id="header3">Another H2</h2> <a id="me" href="http://simonwillison.net/" rel="me">me</a> <span class="s1"> <a href="#" id="s1a1">span1a1</a> <a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a> <span class="span2"> <a href="#" id="s2a1">span2a1</a> </span> <span class="span3"></span> <custom-dashed-tag class="dashed" id="dash2"/> <div data-tag="dashedvalue" id="data1"/> </span> </div> <x id="xid"> <z id="zida"/> <z id="zidab"/> <z id="zidac"/> </x> <y id="yid"> <z id="zidb"/> </y> <p lang="en" id="lang-en">English</p> <p lang="en-gb" id="lang-en-gb">English UK</p> <p lang="en-us" id="lang-en-us">English US</p> <p lang="fr" id="lang-fr">French</p> </div> <div id="footer"> </div> """ def setUp(self): self.soup = BeautifulSoup(self.HTML, 'html.parser') def assertSelects(self, selector, expected_ids, **kwargs): el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)] el_ids.sort() expected_ids.sort() self.assertEqual(expected_ids, el_ids, "Selector %s, expected [%s], got [%s]" % ( selector, ', '.join(expected_ids), ', '.join(el_ids) ) ) assertSelect = assertSelects def assertSelectMultiple(self, *tests): for selector, expected_ids in tests: self.assertSelect(selector, expected_ids) def test_one_tag_one(self): els = self.soup.select('title') self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'title') self.assertEqual(els[0].contents, [u'The title']) def test_one_tag_many(self): els = self.soup.select('div') self.assertEqual(len(els), 4) for div in els: self.assertEqual(div.name, 'div') el = self.soup.select_one('div') self.assertEqual('main', el['id']) def test_select_one_returns_none_if_no_match(self): match = self.soup.select_one('nonexistenttag') self.assertEqual(None, match) def test_tag_in_tag_one(self): els = self.soup.select('div div') self.assertSelects('div div', ['inner', 'data1']) def test_tag_in_tag_many(self): for selector in ('html div', 'html body div', 'body div'): self.assertSelects(selector, ['data1', 'main', 'inner', 'footer']) def test_limit(self): self.assertSelects('html div', ['main'], limit=1) self.assertSelects('html body div', ['inner', 'main'], limit=2) self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'], limit=10) def test_tag_no_match(self): self.assertEqual(len(self.soup.select('del')), 0) def test_invalid_tag(self): self.assertRaises(ValueError, self.soup.select, 'tag%t') def test_select_dashed_tag_ids(self): self.assertSelects('custom-dashed-tag', ['dash1', 'dash2']) def test_select_dashed_by_id(self): dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]') self.assertEqual(dashed[0].name, 'custom-dashed-tag') self.assertEqual(dashed[0]['id'], 'dash2') def test_dashed_tag_text(self): self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.') def test_select_dashed_matches_find_all(self): self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag')) def test_header_tags(self): self.assertSelectMultiple( ('h1', ['header1']), ('h2', ['header2', 'header3']), ) def test_class_one(self): for selector in ('.onep', 'p.onep', 'html p.onep'): els = self.soup.select(selector) self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'p') self.assertEqual(els[0]['class'], ['onep']) def test_class_mismatched_tag(self): els = self.soup.select('div.onep') self.assertEqual(len(els), 0) def test_one_id(self): for selector in ('div#inner', '#inner', 'div div#inner'): self.assertSelects(selector, ['inner']) def test_bad_id(self): els = self.soup.select('#doesnotexist') self.assertEqual(len(els), 0) def test_items_in_id(self): els = self.soup.select('div#inner p') self.assertEqual(len(els), 3) for el in els: self.assertEqual(el.name, 'p') self.assertEqual(els[1]['class'], ['onep']) self.assertFalse(els[0].has_attr('class')) def test_a_bunch_of_emptys(self): for selector in ('div#main del', 'div#main div.oops', 'div div#main'): self.assertEqual(len(self.soup.select(selector)), 0) def test_multi_class_support(self): for selector in ('.class1', 'p.class1', '.class2', 'p.class2', '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'): self.assertSelects(selector, ['pmulti']) def test_multi_class_selection(self): for selector in ('.class1.class3', '.class3.class2', '.class1.class2.class3'): self.assertSelects(selector, ['pmulti']) def test_child_selector(self): self.assertSelects('.s1 > a', ['s1a1', 's1a2']) self.assertSelects('.s1 > a span', ['s1a2s1']) def test_child_selector_id(self): self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1']) def test_attribute_equals(self): self.assertSelectMultiple( ('p[class="onep"]', ['p1']), ('p[id="p1"]', ['p1']), ('[class="onep"]', ['p1']), ('[id="p1"]', ['p1']), ('link[rel="stylesheet"]', ['l1']), ('link[type="text/css"]', ['l1']), ('link[href="blah.css"]', ['l1']), ('link[href="no-blah.css"]', []), ('[rel="stylesheet"]', ['l1']), ('[type="text/css"]', ['l1']), ('[href="blah.css"]', ['l1']), ('[href="no-blah.css"]', []), ('p[href="no-blah.css"]', []), ('[href="no-blah.css"]', []), ) def test_attribute_tilde(self): self.assertSelectMultiple( ('p[class~="class1"]', ['pmulti']), ('p[class~="class2"]', ['pmulti']), ('p[class~="class3"]', ['pmulti']), ('[class~="class1"]', ['pmulti']), ('[class~="class2"]', ['pmulti']), ('[class~="class3"]', ['pmulti']), ('a[rel~="friend"]', ['bob']), ('a[rel~="met"]', ['bob']), ('[rel~="friend"]', ['bob']), ('[rel~="met"]', ['bob']), ) def test_attribute_startswith(self): self.assertSelectMultiple( ('[rel^="style"]', ['l1']), ('link[rel^="style"]', ['l1']), ('notlink[rel^="notstyle"]', []), ('[rel^="notstyle"]', []), ('link[rel^="notstyle"]', []), ('link[href^="bla"]', ['l1']), ('a[href^="http://"]', ['bob', 'me']), ('[href^="http://"]', ['bob', 'me']), ('[id^="p"]', ['pmulti', 'p1']), ('[id^="m"]', ['me', 'main']), ('div[id^="m"]', ['main']), ('a[id^="m"]', ['me']), ('div[data-tag^="dashed"]', ['data1']) ) def test_attribute_endswith(self): self.assertSelectMultiple( ('[href$=".css"]', ['l1']), ('link[href$=".css"]', ['l1']), ('link[id$="1"]', ['l1']), ('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']), ('div[id$="1"]', ['data1']), ('[id$="noending"]', []), ) def test_attribute_contains(self): self.assertSelectMultiple( # From test_attribute_startswith ('[rel*="style"]', ['l1']), ('link[rel*="style"]', ['l1']), ('notlink[rel*="notstyle"]', []), ('[rel*="notstyle"]', []), ('link[rel*="notstyle"]', []), ('link[href*="bla"]', ['l1']), ('[href*="http://"]', ['bob', 'me']), ('[id*="p"]', ['pmulti', 'p1']), ('div[id*="m"]', ['main']), ('a[id*="m"]', ['me']), # From test_attribute_endswith ('[href*=".css"]', ['l1']), ('link[href*=".css"]', ['l1']), ('link[id*="1"]', ['l1']), ('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']), ('div[id*="1"]', ['data1']), ('[id*="noending"]', []), # New for this test ('[href*="."]', ['bob', 'me', 'l1']), ('a[href*="."]', ['bob', 'me']), ('link[href*="."]', ['l1']), ('div[id*="n"]', ['main', 'inner']), ('div[id*="nn"]', ['inner']), ('div[data-tag*="edval"]', ['data1']) ) def test_attribute_exact_or_hypen(self): self.assertSelectMultiple( ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('p[lang|="fr"]', ['lang-fr']), ('p[lang|="gb"]', []), ) def test_attribute_exists(self): self.assertSelectMultiple( ('[rel]', ['l1', 'bob', 'me']), ('link[rel]', ['l1']), ('a[rel]', ['bob', 'me']), ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']), ('p[class]', ['p1', 'pmulti']), ('[blah]', []), ('p[blah]', []), ('div[data-tag]', ['data1']) ) def test_quoted_space_in_selector_name(self): html = """<div style="display: wrong">nope</div> <div style="display: right">yes</div> """ soup = BeautifulSoup(html, 'html.parser') [chosen] = soup.select('div[style="display: right"]') self.assertEqual("yes", chosen.string) def test_unsupported_pseudoclass(self): self.assertRaises( NotImplementedError, self.soup.select, "a:no-such-pseudoclass") self.assertRaises( NotImplementedError, self.soup.select, "a:nth-of-type(a)") def test_nth_of_type(self): # Try to select first paragraph els = self.soup.select('div#inner p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') # Try to select third paragraph els = self.soup.select('div#inner p:nth-of-type(3)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Another') # Try to select (non-existent!) fourth paragraph els = self.soup.select('div#inner p:nth-of-type(4)') self.assertEqual(len(els), 0) # Pass in an invalid value. self.assertRaises( ValueError, self.soup.select, 'div p:nth-of-type(0)') def test_nth_of_type_direct_descendant(self): els = self.soup.select('div#inner > p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') def test_id_child_selector_nth_of_type(self): self.assertSelects('#inner > p:nth-of-type(2)', ['p1']) def test_select_on_element(self): # Other tests operate on the tree; this operates on an element # within the tree. inner = self.soup.find("div", id="main") selected = inner.select("div") # The <div id="inner"> tag was selected. The <div id="footer"> # tag was not. self.assertSelectsIDs(selected, ['inner', 'data1']) def test_overspecified_child_id(self): self.assertSelects(".fancy #inner", ['inner']) self.assertSelects(".normal #inner", []) def test_adjacent_sibling_selector(self): self.assertSelects('#p1 + h2', ['header2']) self.assertSelects('#p1 + h2 + p', ['pmulti']) self.assertSelects('#p1 + #header2 + .class1', ['pmulti']) self.assertEqual([], self.soup.select('#p1 + p')) def test_general_sibling_selector(self): self.assertSelects('#p1 ~ h2', ['header2', 'header3']) self.assertSelects('#p1 ~ #header2', ['header2']) self.assertSelects('#p1 ~ h2 + a', ['me']) self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me']) self.assertEqual([], self.soup.select('#inner ~ h2')) def test_dangling_combinator(self): self.assertRaises(ValueError, self.soup.select, 'h1 >') def test_sibling_combinator_wont_select_same_tag_twice(self): self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr']) # Test the selector grouping operator (the comma) def test_multiple_select(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_with_no_space(self): self.assertSelects('x,y', ['xid', 'yid']) def test_multiple_select_with_more_space(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_duplicated(self): self.assertSelects('x, x', ['xid']) def test_multiple_select_sibling(self): self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr']) def test_multiple_select_tag_and_direct_descendant(self): self.assertSelects('x, y > z', ['xid', 'zidb']) def test_multiple_select_direct_descendant_and_tags(self): self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_multiple_select_indirect_descendant(self): self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_invalid_multiple_select(self): self.assertRaises(ValueError, self.soup.select, ',x, y') self.assertRaises(ValueError, self.soup.select, 'x,,y') def test_multiple_select_attrs(self): self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb']) def test_multiple_select_ids(self): self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab']) def test_multiple_select_nested(self): self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
78,279
Python
.py
1,660
37.827108
118
0.585619
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,819
_htmlparser.py
evilhero_mylar/lib/bs4/builder/_htmlparser.py
"""Use the HTMLParser library to parse HTML files that aren't too bad.""" # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __all__ = [ 'HTMLParserTreeBuilder', ] from HTMLParser import HTMLParser try: from HTMLParser import HTMLParseError except ImportError, e: # HTMLParseError is removed in Python 3.5. Since it can never be # thrown in 3.5, we can just define our own class as a placeholder. class HTMLParseError(Exception): pass import sys import warnings # Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major, minor, release = sys.version_info[:3] CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3 CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3 CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4 from bs4.element import ( CData, Comment, Declaration, Doctype, ProcessingInstruction, ) from bs4.dammit import EntitySubstitution, UnicodeDammit from bs4.builder import ( HTML, HTMLTreeBuilder, STRICT, ) HTMLPARSER = 'html.parser' class BeautifulSoupHTMLParser(HTMLParser): def __init__(self, *args, **kwargs): HTMLParser.__init__(self, *args, **kwargs) # Keep a list of empty-element tags that were encountered # without an explicit closing tag. If we encounter a closing tag # of this type, we'll associate it with one of those entries. # # This isn't a stack because we don't care about the # order. It's a list of closing tags we've already handled and # will ignore, assuming they ever show up. self.already_closed_empty_element = [] def handle_startendtag(self, name, attrs): # This is only called when the markup looks like # <tag/>. # is_startend() tells handle_starttag not to close the tag # just because its name matches a known empty-element tag. We # know that this is an empty-element tag and we want to call # handle_endtag ourselves. tag = self.handle_starttag(name, attrs, handle_empty_element=False) self.handle_endtag(name) def handle_starttag(self, name, attrs, handle_empty_element=True): # XXX namespace attr_dict = {} for key, value in attrs: # Change None attribute values to the empty string # for consistency with the other tree builders. if value is None: value = '' attr_dict[key] = value attrvalue = '""' #print "START", name tag = self.soup.handle_starttag(name, None, None, attr_dict) if tag and tag.is_empty_element and handle_empty_element: # Unlike other parsers, html.parser doesn't send separate end tag # events for empty-element tags. (It's handled in # handle_startendtag, but only if the original markup looked like # <tag/>.) # # So we need to call handle_endtag() ourselves. Since we # know the start event is identical to the end event, we # don't want handle_endtag() to cross off any previous end # events for tags of this name. self.handle_endtag(name, check_already_closed=False) # But we might encounter an explicit closing tag for this tag # later on. If so, we want to ignore it. self.already_closed_empty_element.append(name) def handle_endtag(self, name, check_already_closed=True): #print "END", name if check_already_closed and name in self.already_closed_empty_element: # This is a redundant end tag for an empty-element tag. # We've already called handle_endtag() for it, so just # check it off the list. # print "ALREADY CLOSED", name self.already_closed_empty_element.remove(name) else: self.soup.handle_endtag(name) def handle_data(self, data): self.soup.handle_data(data) def handle_charref(self, name): # XXX workaround for a bug in HTMLParser. Remove this once # it's fixed in all supported versions. # http://bugs.python.org/issue13633 if name.startswith('x'): real_name = int(name.lstrip('x'), 16) elif name.startswith('X'): real_name = int(name.lstrip('X'), 16) else: real_name = int(name) try: data = unichr(real_name) except (ValueError, OverflowError), e: data = u"\N{REPLACEMENT CHARACTER}" self.handle_data(data) def handle_entityref(self, name): character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) if character is not None: data = character else: data = "&%s;" % name self.handle_data(data) def handle_comment(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(Comment) def handle_decl(self, data): self.soup.endData() if data.startswith("DOCTYPE "): data = data[len("DOCTYPE "):] elif data == 'DOCTYPE': # i.e. "<!DOCTYPE>" data = '' self.soup.handle_data(data) self.soup.endData(Doctype) def unknown_decl(self, data): if data.upper().startswith('CDATA['): cls = CData data = data[len('CDATA['):] else: cls = Declaration self.soup.endData() self.soup.handle_data(data) self.soup.endData(cls) def handle_pi(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(ProcessingInstruction) class HTMLParserTreeBuilder(HTMLTreeBuilder): is_xml = False picklable = True NAME = HTMLPARSER features = [NAME, HTML, STRICT] def __init__(self, *args, **kwargs): if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED: kwargs['strict'] = False if CONSTRUCTOR_TAKES_CONVERT_CHARREFS: kwargs['convert_charrefs'] = False self.parser_args = (args, kwargs) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None, exclude_encodings=None): """ :return: A 4-tuple (markup, original encoding, encoding declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """ if isinstance(markup, unicode): yield (markup, None, None, False) return try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True, exclude_encodings=exclude_encodings) yield (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): args, kwargs = self.parser_args parser = BeautifulSoupHTMLParser(*args, **kwargs) parser.soup = self.soup try: parser.feed(markup) except HTMLParseError, e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e parser.already_closed_empty_element = [] # Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: import re attrfind_tolerant = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """, re.VERBOSE) BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend from html.parser import tagfind, attrfind def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = rawdata[i+1:k].lower() while k < endpos: if self.strict: m = attrfind.match(rawdata, k) else: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = self.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) if self.strict: self.error("junk characters in start tag: %r" % (rawdata[k:endpos][:20],)) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) BeautifulSoupHTMLParser.parse_starttag = parse_starttag BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode CONSTRUCTOR_TAKES_STRICT = True
11,609
Python
.py
270
33.8
318
0.602165
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,820
_lxml.py
evilhero_mylar/lib/bs4/builder/_lxml.py
# Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __all__ = [ 'LXMLTreeBuilderForXML', 'LXMLTreeBuilder', ] from io import BytesIO from StringIO import StringIO import collections from lxml import etree from bs4.element import ( Comment, Doctype, NamespacedAttribute, ProcessingInstruction, XMLProcessingInstruction, ) from bs4.builder import ( FAST, HTML, HTMLTreeBuilder, PERMISSIVE, ParserRejectedMarkup, TreeBuilder, XML) from bs4.dammit import EncodingDetector LXML = 'lxml' class LXMLTreeBuilderForXML(TreeBuilder): DEFAULT_PARSER_CLASS = etree.XMLParser is_xml = True processing_instruction_class = XMLProcessingInstruction NAME = "lxml-xml" ALTERNATE_NAMES = ["xml"] # Well, it's permissive by XML parser standards. features = [NAME, LXML, XML, FAST, PERMISSIVE] CHUNK_SIZE = 512 # This namespace mapping is specified in the XML Namespace # standard. DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"} def default_parser(self, encoding): # This can either return a parser object or a class, which # will be instantiated with default arguments. if self._default_parser is not None: return self._default_parser return etree.XMLParser( target=self, strip_cdata=False, recover=True, encoding=encoding) def parser_for(self, encoding): # Use the default parser. parser = self.default_parser(encoding) if isinstance(parser, collections.Callable): # Instantiate the parser with default arguments parser = parser(target=self, strip_cdata=False, encoding=encoding) return parser def __init__(self, parser=None, empty_element_tags=None): # TODO: Issue a warning if parser is present but not a # callable, since that means there's no way to create new # parsers for different encodings. self._default_parser = parser if empty_element_tags is not None: self.empty_element_tags = set(empty_element_tags) self.soup = None self.nsmaps = [self.DEFAULT_NSMAPS] def _getNsTag(self, tag): # Split the namespace URL out of a fully-qualified lxml tag # name. Copied from lxml's src/lxml/sax.py. if tag[0] == '{': return tuple(tag[1:].split('}', 1)) else: return (None, tag) def prepare_markup(self, markup, user_specified_encoding=None, exclude_encodings=None, document_declared_encoding=None): """ :yield: A series of 4-tuples. (markup, encoding, declared encoding, has undergone character replacement) Each 4-tuple represents a strategy for parsing the document. """ # Instead of using UnicodeDammit to convert the bytestring to # Unicode using different encodings, use EncodingDetector to # iterate over the encodings, and tell lxml to try to parse # the document as each one in turn. is_html = not self.is_xml if is_html: self.processing_instruction_class = ProcessingInstruction else: self.processing_instruction_class = XMLProcessingInstruction if isinstance(markup, unicode): # We were given Unicode. Maybe lxml can parse Unicode on # this system? yield markup, None, document_declared_encoding, False if isinstance(markup, unicode): # No, apparently not. Convert the Unicode to UTF-8 and # tell lxml to parse it as UTF-8. yield (markup.encode("utf8"), "utf8", document_declared_encoding, False) try_encodings = [user_specified_encoding, document_declared_encoding] detector = EncodingDetector( markup, try_encodings, is_html, exclude_encodings) for encoding in detector.encodings: yield (detector.markup, encoding, document_declared_encoding, False) def feed(self, markup): if isinstance(markup, bytes): markup = BytesIO(markup) elif isinstance(markup, unicode): markup = StringIO(markup) # Call feed() at least once, even if the markup is empty, # or the parser won't be initialized. data = markup.read(self.CHUNK_SIZE) try: self.parser = self.parser_for(self.soup.original_encoding) self.parser.feed(data) while len(data) != 0: # Now call feed() on the rest of the data, chunk by chunk. data = markup.read(self.CHUNK_SIZE) if len(data) != 0: self.parser.feed(data) self.parser.close() except (UnicodeDecodeError, LookupError, etree.ParserError), e: raise ParserRejectedMarkup(str(e)) def close(self): self.nsmaps = [self.DEFAULT_NSMAPS] def start(self, name, attrs, nsmap={}): # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. attrs = dict(attrs) nsprefix = None # Invert each namespace map as it comes in. if len(self.nsmaps) > 1: # There are no new namespaces for this tag, but # non-default namespaces are in play, so we need a # separate tag stack to know when they end. self.nsmaps.append(None) elif len(nsmap) > 0: # A new namespace mapping has come into play. inverted_nsmap = dict((value, key) for key, value in nsmap.items()) self.nsmaps.append(inverted_nsmap) # Also treat the namespace mapping as a set of attributes on the # tag, so we can recreate it later. attrs = attrs.copy() for prefix, namespace in nsmap.items(): attribute = NamespacedAttribute( "xmlns", prefix, "http://www.w3.org/2000/xmlns/") attrs[attribute] = namespace # Namespaces are in play. Find any attributes that came in # from lxml with namespaces attached to their names, and # turn then into NamespacedAttribute objects. new_attrs = {} for attr, value in attrs.items(): namespace, attr = self._getNsTag(attr) if namespace is None: new_attrs[attr] = value else: nsprefix = self._prefix_for_namespace(namespace) attr = NamespacedAttribute(nsprefix, attr, namespace) new_attrs[attr] = value attrs = new_attrs namespace, name = self._getNsTag(name) nsprefix = self._prefix_for_namespace(namespace) self.soup.handle_starttag(name, namespace, nsprefix, attrs) def _prefix_for_namespace(self, namespace): """Find the currently active prefix for the given namespace.""" if namespace is None: return None for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: return inverted_nsmap[namespace] return None def end(self, name): self.soup.endData() completed_tag = self.soup.tagStack[-1] namespace, name = self._getNsTag(name) nsprefix = None if namespace is not None: for inverted_nsmap in reversed(self.nsmaps): if inverted_nsmap is not None and namespace in inverted_nsmap: nsprefix = inverted_nsmap[namespace] break self.soup.handle_endtag(name, nsprefix) if len(self.nsmaps) > 1: # This tag, or one of its parents, introduced a namespace # mapping, so pop it off the stack. self.nsmaps.pop() def pi(self, target, data): self.soup.endData() self.soup.handle_data(target + ' ' + data) self.soup.endData(self.processing_instruction_class) def data(self, content): self.soup.handle_data(content) def doctype(self, name, pubid, system): self.soup.endData() doctype = Doctype.for_name_and_ids(name, pubid, system) self.soup.object_was_parsed(doctype) def comment(self, content): "Handle comments as Comment objects." self.soup.endData() self.soup.handle_data(content) self.soup.endData(Comment) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): NAME = LXML ALTERNATE_NAMES = ["lxml-html"] features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE] is_xml = False processing_instruction_class = ProcessingInstruction def default_parser(self, encoding): return etree.HTMLParser def feed(self, markup): encoding = self.soup.original_encoding try: self.parser = self.parser_for(encoding) self.parser.feed(markup) self.parser.close() except (UnicodeDecodeError, LookupError, etree.ParserError), e: raise ParserRejectedMarkup(str(e)) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return u'<html><body>%s</body></html>' % fragment
9,468
Python
.py
219
33.762557
82
0.63279
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,821
__init__.py
evilhero_mylar/lib/bs4/builder/__init__.py
# Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from collections import defaultdict import itertools import sys from bs4.element import ( CharsetMetaAttributeValue, ContentMetaAttributeValue, HTMLAwareEntitySubstitution, whitespace_re ) __all__ = [ 'HTMLTreeBuilder', 'SAXTreeBuilder', 'TreeBuilder', 'TreeBuilderRegistry', ] # Some useful features for a TreeBuilder to have. FAST = 'fast' PERMISSIVE = 'permissive' STRICT = 'strict' XML = 'xml' HTML = 'html' HTML_5 = 'html5' class TreeBuilderRegistry(object): def __init__(self): self.builders_for_feature = defaultdict(list) self.builders = [] def register(self, treebuilder_class): """Register a treebuilder based on its advertised features.""" for feature in treebuilder_class.features: self.builders_for_feature[feature].insert(0, treebuilder_class) self.builders.insert(0, treebuilder_class) def lookup(self, *features): if len(self.builders) == 0: # There are no builders at all. return None if len(features) == 0: # They didn't ask for any features. Give them the most # recently registered builder. return self.builders[0] # Go down the list of features in order, and eliminate any builders # that don't match every feature. features = list(features) features.reverse() candidates = None candidate_set = None while len(features) > 0: feature = features.pop() we_have_the_feature = self.builders_for_feature.get(feature, []) if len(we_have_the_feature) > 0: if candidates is None: candidates = we_have_the_feature candidate_set = set(candidates) else: # Eliminate any candidates that don't have this feature. candidate_set = candidate_set.intersection( set(we_have_the_feature)) # The only valid candidates are the ones in candidate_set. # Go through the original list of candidates and pick the first one # that's in candidate_set. if candidate_set is None: return None for candidate in candidates: if candidate in candidate_set: return candidate return None # The BeautifulSoup class will take feature lists from developers and use them # to look up builders in this registry. builder_registry = TreeBuilderRegistry() class TreeBuilder(object): """Turn a document into a Beautiful Soup object tree.""" NAME = "[Unknown tree builder]" ALTERNATE_NAMES = [] features = [] is_xml = False picklable = False preserve_whitespace_tags = set() empty_element_tags = None # A tag will be considered an empty-element # tag when and only when it has no contents. # A value for these tag/attribute combinations is a space- or # comma-separated list of CDATA, rather than a single CDATA. cdata_list_attributes = {} def __init__(self): self.soup = None def reset(self): pass def can_be_empty_element(self, tag_name): """Might a tag with this name be an empty-element tag? The final markup may or may not actually present this tag as self-closing. For instance: an HTMLBuilder does not consider a <p> tag to be an empty-element tag (it's not in HTMLBuilder.empty_element_tags). This means an empty <p> tag will be presented as "<p></p>", not "<p />". The default implementation has no opinion about which tags are empty-element tags, so a tag will be presented as an empty-element tag if and only if it has no contents. "<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will be left alone. """ if self.empty_element_tags is None: return True return tag_name in self.empty_element_tags def feed(self, markup): raise NotImplementedError() def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None): return markup, None, None, False def test_fragment_to_document(self, fragment): """Wrap an HTML fragment to make it look like a document. Different parsers do this differently. For instance, lxml introduces an empty <head> tag, and html5lib doesn't. Abstracting this away lets us write simple tests which run HTML fragments through the parser and compare the results against other HTML fragments. This method should not be used outside of tests. """ return fragment def set_up_substitutions(self, tag): return False def _replace_cdata_list_attribute_values(self, tag_name, attrs): """Replaces class="foo bar" with class=["foo", "bar"] Modifies its input in place. """ if not attrs: return attrs if self.cdata_list_attributes: universal = self.cdata_list_attributes.get('*', []) tag_specific = self.cdata_list_attributes.get( tag_name.lower(), None) for attr in attrs.keys(): if attr in universal or (tag_specific and attr in tag_specific): # We have a "class"-type attribute whose string # value is a whitespace-separated list of # values. Split it into a list. value = attrs[attr] if isinstance(value, basestring): values = whitespace_re.split(value) else: # html5lib sometimes calls setAttributes twice # for the same tag when rearranging the parse # tree. On the second call the attribute value # here is already a list. If this happens, # leave the value alone rather than trying to # split it again. values = value attrs[attr] = values return attrs class SAXTreeBuilder(TreeBuilder): """A Beautiful Soup treebuilder that listens for SAX events.""" def feed(self, markup): raise NotImplementedError() def close(self): pass def startElement(self, name, attrs): attrs = dict((key[1], value) for key, value in list(attrs.items())) #print "Start %s, %r" % (name, attrs) self.soup.handle_starttag(name, attrs) def endElement(self, name): #print "End %s" % name self.soup.handle_endtag(name) def startElementNS(self, nsTuple, nodeName, attrs): # Throw away (ns, nodeName) for now. self.startElement(nodeName, attrs) def endElementNS(self, nsTuple, nodeName): # Throw away (ns, nodeName) for now. self.endElement(nodeName) #handler.endElementNS((ns, node.nodeName), node.nodeName) def startPrefixMapping(self, prefix, nodeValue): # Ignore the prefix for now. pass def endPrefixMapping(self, prefix): # Ignore the prefix for now. # handler.endPrefixMapping(prefix) pass def characters(self, content): self.soup.handle_data(content) def startDocument(self): pass def endDocument(self): pass class HTMLTreeBuilder(TreeBuilder): """This TreeBuilder knows facts about HTML. Such as which tags are empty-element tags. """ preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags empty_element_tags = set([ # These are from HTML5. 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', # These are from HTML4, removed in HTML5. 'spacer', 'frame' ]) # The HTML standard defines these attributes as containing a # space-separated list of values, not a single value. That is, # class="foo bar" means that the 'class' attribute has two values, # 'foo' and 'bar', not the single value 'foo bar'. When we # encounter one of these attributes, we will parse its value into # a list of values if possible. Upon output, the list will be # converted back into a string. cdata_list_attributes = { "*" : ['class', 'accesskey', 'dropzone'], "a" : ['rel', 'rev'], "link" : ['rel', 'rev'], "td" : ["headers"], "th" : ["headers"], "td" : ["headers"], "form" : ["accept-charset"], "object" : ["archive"], # These are HTML5 specific, as are *.accesskey and *.dropzone above. "area" : ["rel"], "icon" : ["sizes"], "iframe" : ["sandbox"], "output" : ["for"], } def set_up_substitutions(self, tag): # We are only interested in <meta> tags if tag.name != 'meta': return False http_equiv = tag.get('http-equiv') content = tag.get('content') charset = tag.get('charset') # We are interested in <meta> tags that say what encoding the # document was originally in. This means HTML 5-style <meta> # tags that provide the "charset" attribute. It also means # HTML 4-style <meta> tags that provide the "content" # attribute and have "http-equiv" set to "content-type". # # In both cases we will replace the value of the appropriate # attribute with a standin object that can take on any # encoding. meta_encoding = None if charset is not None: # HTML 5 style: # <meta charset="utf8"> meta_encoding = charset tag['charset'] = CharsetMetaAttributeValue(charset) elif (content is not None and http_equiv is not None and http_equiv.lower() == 'content-type'): # HTML 4 style: # <meta http-equiv="content-type" content="text/html; charset=utf8"> tag['content'] = ContentMetaAttributeValue(content) return (meta_encoding is not None) def register_treebuilders_from(module): """Copy TreeBuilders from the given module into this module.""" # I'm fairly sure this is not the best way to do this. this_module = sys.modules['bs4.builder'] for name in module.__all__: obj = getattr(module, name) if issubclass(obj, TreeBuilder): setattr(this_module, name, obj) this_module.__all__.append(name) # Register the builder while we're at it. this_module.builder_registry.register(obj) class ParserRejectedMarkup(Exception): pass # Builders are registered in reverse order of priority, so that custom # builder registrations will take precedence. In general, we want lxml # to take precedence over html5lib, because it's faster. And we only # want to use HTMLParser as a last result. from . import _htmlparser register_treebuilders_from(_htmlparser) try: from . import _html5lib register_treebuilders_from(_html5lib) except ImportError: # They don't have html5lib installed. pass try: from . import _lxml register_treebuilders_from(_lxml) except ImportError: # They don't have lxml installed. pass
11,553
Python
.py
274
33.244526
140
0.623797
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,822
_html5lib.py
evilhero_mylar/lib/bs4/builder/_html5lib.py
# Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __all__ = [ 'HTML5TreeBuilder', ] import warnings import re from bs4.builder import ( PERMISSIVE, HTML, HTML_5, HTMLTreeBuilder, ) from bs4.element import ( NamespacedAttribute, whitespace_re, ) import html5lib from html5lib.constants import ( namespaces, prefixes, ) from bs4.element import ( Comment, Doctype, NavigableString, Tag, ) try: # Pre-0.99999999 from html5lib.treebuilders import _base as treebuilder_base new_html5lib = False except ImportError, e: # 0.99999999 and up from html5lib.treebuilders import base as treebuilder_base new_html5lib = True class HTML5TreeBuilder(HTMLTreeBuilder): """Use html5lib to build a tree.""" NAME = "html5lib" features = [NAME, PERMISSIVE, HTML_5, HTML] def prepare_markup(self, markup, user_specified_encoding, document_declared_encoding=None, exclude_encodings=None): # Store the user-specified encoding for use later on. self.user_specified_encoding = user_specified_encoding # document_declared_encoding and exclude_encodings aren't used # ATM because the html5lib TreeBuilder doesn't use # UnicodeDammit. if exclude_encodings: warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.") yield (markup, None, None, False) # These methods are defined by Beautiful Soup. def feed(self, markup): if self.soup.parse_only is not None: warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.") parser = html5lib.HTMLParser(tree=self.create_treebuilder) extra_kwargs = dict() if not isinstance(markup, unicode): if new_html5lib: extra_kwargs['override_encoding'] = self.user_specified_encoding else: extra_kwargs['encoding'] = self.user_specified_encoding doc = parser.parse(markup, **extra_kwargs) # Set the character encoding detected by the tokenizer. if isinstance(markup, unicode): # We need to special-case this because html5lib sets # charEncoding to UTF-8 if it gets Unicode input. doc.original_encoding = None else: original_encoding = parser.tokenizer.stream.charEncoding[0] if not isinstance(original_encoding, basestring): # In 0.99999999 and up, the encoding is an html5lib # Encoding object. We want to use a string for compatibility # with other tree builders. original_encoding = original_encoding.name doc.original_encoding = original_encoding def create_treebuilder(self, namespaceHTMLElements): self.underlying_builder = TreeBuilderForHtml5lib( namespaceHTMLElements, self.soup) return self.underlying_builder def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" return u'<html><head></head><body>%s</body></html>' % fragment class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder): def __init__(self, namespaceHTMLElements, soup=None): if soup: self.soup = soup else: from bs4 import BeautifulSoup self.soup = BeautifulSoup("", "html.parser") super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) def documentClass(self): self.soup.reset() return Element(self.soup, self.soup, None) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = Doctype.for_name_and_ids(name, publicId, systemId) self.soup.object_was_parsed(doctype) def elementClass(self, name, namespace): tag = self.soup.new_tag(name, namespace) return Element(tag, self.soup, namespace) def commentClass(self, data): return TextNode(Comment(data), self.soup) def fragmentClass(self): from bs4 import BeautifulSoup self.soup = BeautifulSoup("", "html.parser") self.soup.name = "[document_fragment]" return Element(self.soup, self.soup, None) def appendChild(self, node): # XXX This code is not covered by the BS4 tests. self.soup.append(node.element) def getDocument(self): return self.soup def getFragment(self): return treebuilder_base.TreeBuilder.getFragment(self).element def testSerializer(self, element): from bs4 import BeautifulSoup rv = [] doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$') def serializeElement(element, indent=0): if isinstance(element, BeautifulSoup): pass if isinstance(element, Doctype): m = doctype_re.match(element) if m: name = m.group(1) if m.lastindex > 1: publicId = m.group(2) or "" systemId = m.group(3) or m.group(4) or "" rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % (' ' * indent, name, publicId, systemId)) else: rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name)) else: rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) elif isinstance(element, Comment): rv.append("|%s<!-- %s -->" % (' ' * indent, element)) elif isinstance(element, NavigableString): rv.append("|%s\"%s\"" % (' ' * indent, element)) else: if element.namespace: name = "%s %s" % (prefixes[element.namespace], element.name) else: name = element.name rv.append("|%s<%s>" % (' ' * indent, name)) if element.attrs: attributes = [] for name, value in element.attrs.items(): if isinstance(name, NamespacedAttribute): name = "%s %s" % (prefixes[name.namespace], name.name) if isinstance(value, list): value = " ".join(value) attributes.append((name, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) indent += 2 for child in element.children: serializeElement(child, indent) serializeElement(element, 0) return "\n".join(rv) class AttrList(object): def __init__(self, element): self.element = element self.attrs = dict(self.element.attrs) def __iter__(self): return list(self.attrs.items()).__iter__() def __setitem__(self, name, value): # If this attribute is a multi-valued attribute for this element, # turn its value into a list. list_attr = HTML5TreeBuilder.cdata_list_attributes if (name in list_attr['*'] or (self.element.name in list_attr and name in list_attr[self.element.name])): # A node that is being cloned may have already undergone # this procedure. if not isinstance(value, list): value = whitespace_re.split(value) self.element[name] = value def items(self): return list(self.attrs.items()) def keys(self): return list(self.attrs.keys()) def __len__(self): return len(self.attrs) def __getitem__(self, name): return self.attrs[name] def __contains__(self, name): return name in list(self.attrs.keys()) class Element(treebuilder_base.Node): def __init__(self, element, soup, namespace): treebuilder_base.Node.__init__(self, element.name) self.element = element self.soup = soup self.namespace = namespace def appendChild(self, node): string_child = child = None if isinstance(node, basestring): # Some other piece of code decided to pass in a string # instead of creating a TextElement object to contain the # string. string_child = child = node elif isinstance(node, Tag): # Some other piece of code decided to pass in a Tag # instead of creating an Element object to contain the # Tag. child = node elif node.element.__class__ == NavigableString: string_child = child = node.element node.parent = self else: child = node.element node.parent = self if not isinstance(child, basestring) and child.parent is not None: node.element.extract() if (string_child and self.element.contents and self.element.contents[-1].__class__ == NavigableString): # We are appending a string onto another string. # TODO This has O(n^2) performance, for input like # "a</a>a</a>a</a>..." old_element = self.element.contents[-1] new_element = self.soup.new_string(old_element + string_child) old_element.replace_with(new_element) self.soup._most_recent_element = new_element else: if isinstance(node, basestring): # Create a brand new NavigableString from this string. child = self.soup.new_string(node) # Tell Beautiful Soup to act as if it parsed this element # immediately after the parent's last descendant. (Or # immediately after the parent, if it has no children.) if self.element.contents: most_recent_element = self.element._last_descendant(False) elif self.element.next_element is not None: # Something from further ahead in the parse tree is # being inserted into this earlier element. This is # very annoying because it means an expensive search # for the last element in the tree. most_recent_element = self.soup._last_descendant() else: most_recent_element = self.element self.soup.object_was_parsed( child, parent=self.element, most_recent_element=most_recent_element) def getAttributes(self): if isinstance(self.element, Comment): return {} return AttrList(self.element) def setAttributes(self, attributes): if attributes is not None and len(attributes) > 0: converted_attributes = [] for name, value in list(attributes.items()): if isinstance(name, tuple): new_name = NamespacedAttribute(*name) del attributes[name] attributes[new_name] = value self.soup.builder._replace_cdata_list_attribute_values( self.name, attributes) for name, value in attributes.items(): self.element[name] = value # The attributes may contain variables that need substitution. # Call set_up_substitutions manually. # # The Tag constructor called this method when the Tag was created, # but we just set/changed the attributes, so call it again. self.soup.builder.set_up_substitutions(self.element) attributes = property(getAttributes, setAttributes) def insertText(self, data, insertBefore=None): text = TextNode(self.soup.new_string(data), self.soup) if insertBefore: self.insertBefore(text, insertBefore) else: self.appendChild(text) def insertBefore(self, node, refNode): index = self.element.index(refNode.element) if (node.element.__class__ == NavigableString and self.element.contents and self.element.contents[index-1].__class__ == NavigableString): # (See comments in appendChild) old_node = self.element.contents[index-1] new_str = self.soup.new_string(old_node + node.element) old_node.replace_with(new_str) else: self.element.insert(index, node.element) node.parent = self def removeChild(self, node): node.element.extract() def reparentChildren(self, new_parent): """Move all of this tag's children into another tag.""" # print "MOVE", self.element.contents # print "FROM", self.element # print "TO", new_parent.element element = self.element new_parent_element = new_parent.element # Determine what this tag's next_element will be once all the children # are removed. final_next_element = element.next_sibling new_parents_last_descendant = new_parent_element._last_descendant(False, False) if len(new_parent_element.contents) > 0: # The new parent already contains children. We will be # appending this tag's children to the end. new_parents_last_child = new_parent_element.contents[-1] new_parents_last_descendant_next_element = new_parents_last_descendant.next_element else: # The new parent contains no children. new_parents_last_child = None new_parents_last_descendant_next_element = new_parent_element.next_element to_append = element.contents if len(to_append) > 0: # Set the first child's previous_element and previous_sibling # to elements within the new parent first_child = to_append[0] if new_parents_last_descendant: first_child.previous_element = new_parents_last_descendant else: first_child.previous_element = new_parent_element first_child.previous_sibling = new_parents_last_child if new_parents_last_descendant: new_parents_last_descendant.next_element = first_child else: new_parent_element.next_element = first_child if new_parents_last_child: new_parents_last_child.next_sibling = first_child # Find the very last element being moved. It is now the # parent's last descendant. It has no .next_sibling and # its .next_element is whatever the previous last # descendant had. last_childs_last_descendant = to_append[-1]._last_descendant(False, True) last_childs_last_descendant.next_element = new_parents_last_descendant_next_element if new_parents_last_descendant_next_element: # TODO: This code has no test coverage and I'm not sure # how to get html5lib to go through this path, but it's # just the other side of the previous line. new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant last_childs_last_descendant.next_sibling = None for child in to_append: child.parent = new_parent_element new_parent_element.contents.append(child) # Now that this element has no children, change its .next_element. element.contents = [] element.next_element = final_next_element # print "DONE WITH MOVE" # print "FROM", self.element # print "TO", new_parent_element def cloneNode(self): tag = self.soup.new_tag(self.element.name, self.namespace) node = Element(tag, self.soup, self.namespace) for key,value in self.attributes: node.attributes[key] = value return node def hasContent(self): return self.element.contents def getNameTuple(self): if self.namespace == None: return namespaces["html"], self.name else: return self.namespace, self.name nameTuple = property(getNameTuple) class TextNode(Element): def __init__(self, element, soup): treebuilder_base.Node.__init__(self, None) self.element = element self.soup = soup def cloneNode(self): raise NotImplementedError
16,711
Python
.py
365
34.580822
159
0.603746
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,823
evaljs.py
evilhero_mylar/lib/js2py/evaljs.py
# coding=utf-8 """ This module is still experimental! """ from .translators import translate_js, DEFAULT_HEADER import sys import time import json import six import os import hashlib import codecs import pyjs __all__ = ['EvalJs', 'translate_js', 'import_js', 'eval_js', 'translate_file', 'run_file'] DEBUG = False def path_as_local(path): if os.path.isabs(path): return path # relative to cwd return os.path.join(os.getcwd(), path) def import_js(path, lib_name, globals): """Imports from javascript source file. globals is your globals()""" with codecs.open(path_as_local(path), "r", "utf-8") as f: js = f.read() e = EvalJs() e.execute(js) var = e.context['var'] globals[lib_name] = var.to_python() def get_file_contents(path_or_file): if hasattr(path_or_file, 'read'): js = path_or_file.read() else: with codecs.open(path_as_local(path_or_file), "r", "utf-8") as f: js = f.read() return js def write_file_contents(path_or_file, contents): if hasattr(path_or_file, 'write'): path_or_file.write(contents) else: with open(path_as_local(path_or_file), 'w') as f: f.write(contents) def translate_file(input_path, output_path): ''' Translates input JS file to python and saves the it to the output path. It appends some convenience code at the end so that it is easy to import JS objects. For example we have a file 'example.js' with: var a = function(x) {return x} translate_file('example.js', 'example.py') Now example.py can be easily importend and used: >>> from example import example >>> example.a(30) 30 ''' js = get_file_contents(input_path) py_code = translate_js(js) lib_name = os.path.basename(output_path).split('.')[0] head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(lib_name) tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name out = head + py_code + tail write_file_contents(output_path, out) def run_file(path_or_file, context=None): ''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context). ''' if context is None: context = EvalJs() if not isinstance(context, EvalJs): raise TypeError('context must be the instance of EvalJs') eval_value = context.eval(get_file_contents(path_or_file)) return eval_value, context def eval_js(js): """Just like javascript eval. Translates javascript to python, executes and returns python object. js is javascript source code EXAMPLE: >>> import js2py >>> add = js2py.eval_js('function add(a, b) {return a + b}') >>> add(1, 2) + 3 6 >>> add('1', 2, 3) u'12' >>> add.constructor function Function() { [python code] } NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type. For Js functions and objects, returns Python wrapper - basically behaves like normal python object. If you really want to convert object to python dict you can use to_dict method. """ e = EvalJs() return e.eval(js) class EvalJs(object): """This class supports continuous execution of javascript under same context. >>> js = EvalJs() >>> js.execute('var a = 10;function f(x) {return x*x};') >>> js.f(9) 81 >>> js.a 10 context is a python dict or object that contains python variables that should be available to JavaScript For example: >>> js = EvalJs({'a': 30}) >>> js.execute('var x = a') >>> js.x 30 You can run interactive javascript console with console method!""" def __init__(self, context={}): self.__dict__['_context'] = {} exec(DEFAULT_HEADER, self._context) self.__dict__['_var'] = self._context['var'].to_python() if not isinstance(context, dict): try: context = context.__dict__ except: raise TypeError('context has to be either a dict or have __dict__ attr') for k, v in six.iteritems(context): setattr(self._var, k, v) def execute(self, js=None, use_compilation_plan=False): """executes javascript js in current context During initial execute() the converted js is cached for re-use. That means next time you run the same javascript snippet you save many instructions needed to parse and convert the js code to python code. This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process is typically expensive compared to actually running the generated python code. Note that the cache is just a dict, it has no expiration or cleanup so when running this in automated situations with vast amounts of snippets it might increase memory usage. """ try: cache = self.__dict__['cache'] except KeyError: cache = self.__dict__['cache'] = {} hashkey = hashlib.md5(js.encode('utf-8')).digest() try: compiled = cache[hashkey] except KeyError: code = translate_js(js, '', use_compilation_plan=use_compilation_plan) compiled = cache[hashkey] = compile(code, '<EvalJS snippet>', 'exec') exec(compiled, self._context) def eval(self, expression, use_compilation_plan=False): """evaluates expression in current context and returns its value""" code = 'PyJsEvalResult = eval(%s)'%json.dumps(expression) self.execute(code, use_compilation_plan=use_compilation_plan) return self['PyJsEvalResult'] def execute_debug(self, js): """executes javascript js in current context as opposed to the (faster) self.execute method, you can use your regular debugger to set breakpoints and inspect the generated python code """ code = translate_js(js, '') # make sure you have a temp folder: filename = 'temp' + os.sep + '_' + hashlib.md5(code).hexdigest() + '.py' try: with open(filename, mode='w') as f: f.write(code) execfile(filename, self._context) except Exception as err: raise err finally: os.remove(filename) try: os.remove(filename + 'c') except: pass def eval_debug(self, expression): """evaluates expression in current context and returns its value as opposed to the (faster) self.execute method, you can use your regular debugger to set breakpoints and inspect the generated python code """ code = 'PyJsEvalResult = eval(%s)'%json.dumps(expression) self.execute_debug(code) return self['PyJsEvalResult'] def __getattr__(self, var): return getattr(self._var, var) def __getitem__(self, var): return getattr(self._var, var) def __setattr__(self, var, val): return setattr(self._var, var, val) def __setitem__(self, var, val): return setattr(self._var, var, val) def console(self): """starts to interact (starts interactive console) Something like code.InteractiveConsole""" while True: if six.PY2: code = raw_input('>>> ') else: code = input('>>>') try: print(self.eval(code)) except KeyboardInterrupt: break except Exception as e: import traceback if DEBUG: sys.stderr.write(traceback.format_exc()) else: sys.stderr.write('EXCEPTION: '+str(e)+'\n') time.sleep(0.01) #print x if __name__=='__main__': #with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f: # x = f.read() e = EvalJs() e.execute('square(x)') #e.execute(x) e.console()
8,198
Python
.py
201
32.626866
126
0.612481
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,824
pyjs.py
evilhero_mylar/lib/js2py/pyjs.py
from .base import * from .constructors.jsmath import Math from .constructors.jsdate import Date from .constructors.jsobject import Object from .constructors.jsfunction import Function from .constructors.jsstring import String from .constructors.jsnumber import Number from .constructors.jsboolean import Boolean from .constructors.jsregexp import RegExp from .constructors.jsarray import Array from .prototypes.jsjson import JSON from .host.console import console from .host.jseval import Eval from .host.jsfunctions import parseFloat, parseInt, isFinite, isNaN # Now we have all the necessary items to create global environment for script __all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq', 'PyJsException', 'PyJsBshift', 'Scope', 'PyExceptionToJs', 'JsToPyException', 'JS_BUILTINS', 'appengine', 'set_global_object', 'JsRegExp', 'PyJsException', 'PyExceptionToJs', 'JsToPyException', 'PyJsSwitchException'] # these were defined in base.py builtins = ('true','false','null','undefined','Infinity', 'NaN', 'console', 'String', 'Number', 'Boolean', 'RegExp', 'Math', 'Date', 'Object', 'Function', 'Array', 'parseFloat', 'parseInt', 'isFinite', 'isNaN') #Array, Function, JSON, Error is done later :) # also some built in functions like eval... def set_global_object(obj): obj.IS_CHILD_SCOPE = False this = This({}) this.own = obj.own this.prototype = obj.prototype PyJs.GlobalObject = this # make this available obj.register('this') obj.put('this', this) scope = dict(zip(builtins, [globals()[e] for e in builtins])) # Now add errors: for name, error in ERRORS.items(): scope[name] = error #add eval scope['eval'] = Eval scope['JSON'] = JSON JS_BUILTINS = {k:v for k,v in scope.items()}
1,836
Python
.py
43
38.511628
100
0.708123
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,825
__init__.py
evilhero_mylar/lib/js2py/__init__.py
# The MIT License # # Copyright 2014, 2015 Piotr Dabkowski # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject # to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE """ This module allows you to translate and execute Javascript in pure python. Basically its implementation of ECMAScript 5.1 in pure python. Use eval_js method to execute javascript code and get resulting python object (builtin if possible). EXAMPLE: >>> import js2py >>> add = js2py.eval_js('function add(a, b) {return a + b}') >>> add(1, 2) + 3 6 >>> add('1', 2, 3) u'12' >>> add.constructor function Function() { [python code] } Or use EvalJs to execute many javascript code fragments under same context - you would be able to get any variable from the context! >>> js = js2py.EvalJs() >>> js.execute('var a = 10; function f(x) {return x*x};') >>> js.f(9) 81 >>> js.a 10 Also you can use its console method to play with interactive javascript console. Use parse_js to parse (syntax tree is just like in esprima.js) and translate_js to trasnlate JavaScript. Finally, you can use pyimport statement from inside JS code to import and use python libraries. >>> js2py.eval_js('pyimport urllib; urllib.urlopen("https://www.google.com")') NOTE: This module is still not fully finished: Date and JSON builtin objects are not implemented Array prototype is not fully finished (will be soon) Other than that everything should work fine. """ __author__ = 'Piotr Dabkowski' __all__ = ['EvalJs', 'translate_js', 'import_js', 'eval_js', 'parse_js', 'translate_file', 'run_file'] from .evaljs import * from .translators import parse as parse_js
2,684
Python
.py
52
47.980769
110
0.731166
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,826
base.py
evilhero_mylar/lib/js2py/base.py
'''Most important file in Js2Py implementation: PyJs class - father of all PyJs objects''' from copy import copy import re #from translators import translator from .translators.friendly_nodes import REGEXP_CONVERTER from .utils.injector import fix_js_args from types import FunctionType, ModuleType, GeneratorType, BuiltinFunctionType, MethodType, BuiltinMethodType import traceback # python 3 support import six if six.PY3: basestring = str long = int xrange = range unicode = str def str_repr(s): if six.PY2: return repr(s.encode('utf-8')) else: return repr(s) def MakeError(name, message): """Returns PyJsException with PyJsError inside""" return JsToPyException(ERRORS[name](Js(message))) def to_python(val): if not isinstance(val, PyJs): return val if isinstance(val, PyJsUndefined) or isinstance(val, PyJsNull): return None elif isinstance(val, PyJsNumber): # this can be either float or long/int better to assume its int/long when a whole number... v = val.value try: i = int(v) if v==v else v # nan... return v if i!=v else i except: return v elif isinstance(val, (PyJsString, PyJsBoolean)): return val.value elif isinstance(val, PyObjectWrapper): return val.__dict__['obj'] return JsObjectWrapper(val) def to_dict(js_obj, known=None): # fixed recursion error in self referencing objects res = {} if known is None: known = {} if js_obj in known: return known[js_obj] known[js_obj] = res for k in js_obj: name = k.value input = js_obj.get(name) output = to_python(input) if isinstance(output, JsObjectWrapper): if output._obj.Class=='Object': output = to_dict(output._obj, known) known[input] = output elif output._obj.Class=='Array': output = to_list(output._obj) known[input] = output res[name] = output return res def to_list(js_obj, known=None): res = len(js_obj)*[None] if known is None: known = {} if js_obj in known: return known[js_obj] known[js_obj] = res for k in js_obj: try: name = int(k.value) except: continue input = js_obj.get(str(name)) output = to_python(input) if isinstance(output, JsObjectWrapper): if output._obj.Class in ['Array', 'Arguments']: output = to_list(output._obj, known) known[input] = output elif output._obj.Class in ['Object']: output = to_dict(output._obj) known[input] = output res[name] = output return res def HJs(val): if hasattr(val, '__call__'): # @Js def PyWrapper(this, arguments, var=None): args = tuple(to_python(e) for e in arguments.to_list()) try: py_res = val.__call__(*args) except Exception as e: message = 'your Python function failed! ' try: message += e.message except: pass raise MakeError('Error', message) return py_wrap(py_res) try: PyWrapper.func_name = val.__name__ except: pass return PyWrapper if isinstance(val, tuple): val = list(val) return Js(val) def Js(val): '''Converts Py type to PyJs type''' if isinstance(val, PyJs): return val elif val is None: return undefined elif isinstance(val, basestring): return PyJsString(val, StringPrototype) elif isinstance(val, bool): return true if val else false elif isinstance(val, float) or isinstance(val, int) or isinstance(val, long): # This is supposed to speed things up. may not be the case if val in NUM_BANK: return NUM_BANK[val] return PyJsNumber(float(val), NumberPrototype) elif isinstance(val, FunctionType): return PyJsFunction(val, FunctionPrototype) #elif isinstance(val, ModuleType): # mod = {} # for name in dir(val): # value = getattr(val, name) # if isinstance(value, ModuleType): # continue # prevent recursive module conversion # try: # jsval = HJs(value) # except RuntimeError: # print 'Could not convert %s to PyJs object!' % name # continue # mod[name] = jsval # return Js(mod) #elif isintance(val, ClassType): elif isinstance(val, dict): # convert to object temp = PyJsObject({}, ObjectPrototype) for k, v in six.iteritems(val): temp.put(Js(k), Js(v)) return temp elif isinstance(val, (list, tuple)): #Convert to array return PyJsArray(val, ArrayPrototype) elif isinstance(val, JsObjectWrapper): return val.__dict__['_obj'] else: # try to convert to js object return py_wrap(val) #raise RuntimeError('Cant convert python type to js (%s)' % repr(val)) #try: # obj = {} # for name in dir(val): # if name.startswith('_'): #dont wrap attrs that start with _ # continue # value = getattr(val, name) # import types # if not isinstance(value, (FunctionType, BuiltinFunctionType, MethodType, BuiltinMethodType, # dict, int, basestring, bool, float, long, list, tuple)): # continue # obj[name] = HJs(value) # return Js(obj) #except: # raise RuntimeError('Cant convert python type to js (%s)' % repr(val)) def Type(val): try: return val.TYPE except: raise RuntimeError('Invalid type: '+str(val)) def is_data_descriptor(desc): return desc and ('value' in desc or 'writable' in desc) def is_accessor_descriptor(desc): return desc and ('get' in desc or 'set' in desc) def is_generic_descriptor(desc): return desc and not (is_data_descriptor(desc) or is_accessor_descriptor(desc)) ############################################################################## class PyJs(object): PRIMITIVES = {'String', 'Number', 'Boolean', 'Undefined', 'Null'} TYPE = 'Object' Class = None extensible = True prototype = None own = {} GlobalObject = None IS_CHILD_SCOPE = False value = None def __init__(self, value=None, prototype=None, extensible=False): '''Constructor for Number String and Boolean''' # I dont think this is needed anymore # if self.Class=='String' and not isinstance(value, basestring): # raise TypeError # if self.Class=='Number': # if not isinstance(value, float): # if not (isinstance(value, int) or isinstance(value, long)): # raise TypeError # value = float(value) # if self.Class=='Boolean' and not isinstance(value, bool): # raise TypeError self.value = value self.extensible = extensible self.prototype = prototype self.own = {} def is_undefined(self): return self.Class=='Undefined' def is_null(self): return self.Class=='Null' def is_primitive(self): return self.TYPE in self.PRIMITIVES def is_object(self): return not self.is_primitive() def _type(self): return Type(self) def is_callable(self): return hasattr(self, 'call') def get_own_property(self, prop): return self.own.get(prop) def get_property(self, prop): cand = self.get_own_property(prop) if cand: return cand if self.prototype is not None: return self.prototype.get_property(prop) def get(self, prop): #external use! #prop = prop.value if self.Class=='Undefined' or self.Class=='Null': raise MakeError('TypeError', 'Undefiend and null dont have properties!') if not isinstance(prop, basestring): prop = prop.to_string().value if not isinstance(prop, basestring): raise RuntimeError('Bug') cand = self.get_property(prop) if cand is None: return Js(None) if is_data_descriptor(cand): return cand['value'] if cand['get'].is_undefined(): return cand['get'] return cand['get'].call(self) def can_put(self, prop): #to check desc = self.get_own_property(prop) if desc: #if we have this property if is_accessor_descriptor(desc): return desc['set'].is_callable() # Check if setter method is defined else: #data desc return desc['writable'] if self.prototype is not None: return self.extensible inherited = self.get_property(prop) if inherited is None: return self.extensible if is_accessor_descriptor(inherited): return not inherited['set'].is_undefined() elif self.extensible: return inherited['writable'] return False def put(self, prop, val, op=None): #external use! '''Just like in js: self.prop op= val for example when op is '+' it will be self.prop+=val op can be either None for simple assignment or one of: * / % + - << >> & ^ |''' if self.Class=='Undefined' or self.Class=='Null': raise MakeError('TypeError', 'Undefiend and null dont have properties!') if not isinstance(prop, basestring): prop = prop.to_string().value #we need to set the value to the incremented one if op is not None: val = getattr(self.get(prop), OP_METHODS[op])(val) if not self.can_put(prop): return val own_desc = self.get_own_property(prop) if is_data_descriptor(own_desc): if self.Class=='Array': #only array has different define_own_prop self.define_own_property(prop, {'value':val}) else: self.own[prop]['value'] = val return val desc = self.get_property(prop) if is_accessor_descriptor(desc): desc['set'].call(self, (val,)) else: new = {'value' : val, 'writable' : True, 'configurable' : True, 'enumerable' : True} if self.Class=='Array': self.define_own_property(prop, new) else: self.own[prop] = new return val def has_property(self, prop): return self.get_property(prop) is not None def delete(self, prop): if not isinstance(prop, basestring): prop = prop.to_string().value desc = self.get_own_property(prop) if desc is None: return Js(True) if desc['configurable']: del self.own[prop] return Js(True) return Js(False) def default_value(self, hint=None): # made a mistake at the very early stage and made it to prefer string... caused lots! of problems order = ('valueOf', 'toString') if hint=='String' or (hint is None and self.Class=='Date'): order = ('toString', 'valueOf') for meth_name in order: method = self.get(meth_name) if method is not None and method.is_callable(): cand = method.call(self) if cand.is_primitive(): return cand raise MakeError('TypeError', 'Cannot convert object to primitive value') def define_own_property(self, prop, desc): #Internal use only. External through Object # prop must be a Py string. Desc is either a descriptor or accessor. #Messy method - raw translation from Ecma spec to prevent any bugs. # todo check this current = self.get_own_property(prop) extensible = self.extensible if not current: #We are creating a new property if not extensible: return False if is_data_descriptor(desc) or is_generic_descriptor(desc): DEFAULT_DATA_DESC = {'value': undefined, #undefined 'writable': False, 'enumerable': False, 'configurable': False} DEFAULT_DATA_DESC.update(desc) self.own[prop] = DEFAULT_DATA_DESC else: DEFAULT_ACCESSOR_DESC = {'get': undefined, #undefined 'set': undefined, #undefined 'enumerable': False, 'configurable': False} DEFAULT_ACCESSOR_DESC.update(desc) self.own[prop] = DEFAULT_ACCESSOR_DESC return True if not desc or desc==current: #We dont need to change anything. return True configurable = current['configurable'] if not configurable: #Prevent changing configurable or enumerable if desc.get('configurable'): return False if 'enumerable' in desc and desc['enumerable']!=current['enumerable']: return False if is_generic_descriptor(desc): pass elif is_data_descriptor(current)!=is_data_descriptor(desc): if not configurable: return False if is_data_descriptor(current): del current['value'] del current['writable'] current['set'] = undefined #undefined current['get'] = undefined #undefined else: del current['set'] del current['get'] current['value'] = undefined #undefined current['writable'] = False elif is_data_descriptor(current) and is_data_descriptor(desc): if not configurable: if not current['writable'] and desc.get('writable'): return False if not current['writable'] and 'value' in desc and current['value']!=desc['value']: return False elif is_accessor_descriptor(current) and is_accessor_descriptor(desc): if not configurable: if 'set' in desc and desc['set'] is not current['set']: return False if 'get' in desc and desc['get'] is not current['get']: return False current.update(desc) return True #these methods will work only for Number class def is_infinity(self): assert self.Class=='Number' return self.value==float('inf') or self.value==-float('inf') def is_nan(self): assert self.Class=='Number' return self.value!=self.value #nan!=nan evaluates to true def is_finite(self): return not (self.is_nan() or self.is_infinity()) #Type Conversions. to_type. All must return pyjs subclass instance def to_primitive(self, hint=None): if self.is_primitive(): return self if hint is None and (self.Class=='Number' or self.Class=='Boolean'): # favour number for Class== Number or Boolean default = String hint = 'Number' return self.default_value(hint) def to_boolean(self): typ = Type(self) if typ=='Boolean': #no need to convert return self elif typ=='Null' or typ=='Undefined': #they are both always false return false elif typ=='Number' or typ=='String': #false only for 0, '' and NaN return Js(bool(self.value and self.value==self.value)) # test for nan (nan -> flase) else: #object - always true return true def to_number(self): typ = Type(self) if typ=='Null': #null is 0 return Js(0) elif typ=='Undefined': # undefined is NaN return NaN elif typ=='Boolean': # 1 for true 0 for false return Js(int(self.value)) elif typ=='Number':# or self.Class=='Number': # no need to convert return self elif typ=='String': s = self.value.strip() #Strip white space if not s: # '' is simply 0 return Js(0) if 'x' in s or 'X' in s[:3]: #hex (positive only) try: # try to convert num = int(s, 16) except ValueError: # could not convert > NaN return NaN return Js(num) sign = 1 #get sign if s[0] in '+-': if s[0]=='-': sign = -1 s = s[1:] if s=='Infinity': #Check for infinity keyword. 'NaN' will be NaN anyway. return Js(sign*float('inf')) try: #decimal try num = sign*float(s) # Converted except ValueError: return NaN # could not convert to decimal > return NaN return Js(num) else: #object - most likely it will be NaN. return self.to_primitive('Number').to_number() def to_string(self): typ = Type(self) if typ=='Null': return Js('null') elif typ=='Undefined': return Js('undefined') elif typ=='Boolean': return Js('true') if self.value else Js('false') elif typ=='Number': #or self.Class=='Number': if self.is_nan(): return Js('NaN') elif self.is_infinity(): sign = '-' if self.value<0 else '' return Js(sign+'Infinity') elif isinstance(self.value, long) or self.value.is_integer(): # dont print .0 return Js(unicode(int(self.value))) return Js(unicode(self.value)) # accurate enough elif typ=='String': return self else: #object return self.to_primitive('String').to_string() def to_object(self): typ = self.TYPE if typ=='Null' or typ=='Undefined': raise MakeError('TypeError', 'undefined or null can\'t be converted to object') elif typ=='Boolean': # Unsure here... todo repair here return Boolean.create(self) elif typ=='Number': #? return Number.create(self) elif typ=='String': #? return String.create(self) else: #object return self def to_int32(self): num = self.to_number() if num.is_nan() or num.is_infinity(): return 0 int32 = int(num.value) % 2**32 return int(int32 - 2**32 if int32 >= 2**31 else int32) def strict_equality_comparison(self, other): return PyJsStrictEq(self, other) def cok(self): """Check object coercible""" if self.Class in {'Undefined', 'Null'}: raise MakeError('TypeError', 'undefined or null can\'t be converted to object') def to_int(self): num = self.to_number() if num.is_nan(): return 0 elif num.is_infinity(): return 10**20 if num.value>0 else -10**20 return int(num.value) def to_uint32(self): num = self.to_number() if num.is_nan() or num.is_infinity(): return 0 return int(num.value) % 2**32 def to_uint16(self): num = self.to_number() if num.is_nan() or num.is_infinity(): return 0 return int(num.value) % 2**16 def to_int16(self): num = self.to_number() if num.is_nan() or num.is_infinity(): return 0 int16 = int(num.value) % 2**16 return int(int16 - 2**16 if int16 >= 2**15 else int16) def same_as(self, other): typ = Type(self) if typ!=other.Class: return False if typ=='Undefined' or typ=='Null': return True if typ=='Boolean' or typ=='Number' or typ=='String': return self.value==other.value else: #object return self is other #Id compare. #Not to be used by translation (only internal use) def __getitem__(self, item): return self.get(str(item) if not isinstance(item, PyJs) else item.to_string().value) def __setitem__(self, item, value): self.put(str(item) if not isinstance(item, PyJs) else item.to_string().value, Js(value)) def __len__(self): try: return self.get('length').to_uint32() except: raise TypeError('This object (%s) does not have length property'%self.Class) #Oprators------------- #Unary, other will be implemented as functions. Increments and decrements # will be methods of Number class def __neg__(self): #-u return Js(-self.to_number().value) def __pos__(self): #+u return self.to_number() def __invert__(self): #~u return Js(Js(~self.to_int32()).to_int32()) def neg(self): # !u cant do 'not u' :( return Js(not self.to_boolean().value) def __nonzero__(self): return self.to_boolean().value def __bool__(self): return self.to_boolean().value def typeof(self): if self.is_callable(): return Js('function') typ = Type(self).lower() if typ=='null': typ = 'object' return Js(typ) #Bitwise operators # <<, >>, &, ^, | # << def __lshift__(self, other): lnum = self.to_int32() rnum = other.to_uint32() shiftCount = rnum & 0x1F return Js(Js(lnum << shiftCount).to_int32()) # >> def __rshift__(self, other): lnum = self.to_int32() rnum = other.to_uint32() shiftCount = rnum & 0x1F return Js(Js(lnum >> shiftCount).to_int32()) # >>> def pyjs_bshift(self, other): lnum = self.to_uint32() rnum = other.to_uint32() shiftCount = rnum & 0x1F return Js(Js(lnum >> shiftCount).to_uint32()) # & def __and__(self, other): lnum = self.to_int32() rnum = other.to_int32() return Js(Js(lnum & rnum).to_int32()) # ^ def __xor__(self, other): lnum = self.to_int32() rnum = other.to_int32() return Js(Js(lnum ^ rnum).to_int32()) # | def __or__(self, other): lnum = self.to_int32() rnum = other.to_int32() return Js(Js(lnum | rnum).to_int32()) # Additive operators # + and - are implemented here # + def __add__(self, other): a = self.to_primitive() b = other.to_primitive() if a.TYPE=='String' or b.TYPE=='String': return Js(a.to_string().value+b.to_string().value) a = a.to_number() b = b.to_number() return Js(a.value+b.value) # - def __sub__(self, other): return Js(self.to_number().value-other.to_number().value) #Multiplicative operators # *, / and % are implemented here # * def __mul__(self, other): return Js(self.to_number().value*other.to_number().value) # / def __div__(self, other): a = self.to_number().value b = other.to_number().value if b: return Js(a/b) if not a or a!=a: return NaN return Infinity if a>0 else -Infinity # % def __mod__(self, other): a = self.to_number().value b = other.to_number().value if abs(a)==float('inf') or not b: return NaN if abs(b)==float('inf'): return Js(a) pyres = Js(a%b) #different signs in python and javascript #python has the same sign as b and js has the same #sign as a. if a<0 and pyres.value>0: pyres.value -= abs(b) elif a>0 and pyres.value<0: pyres.value += abs(b) return Js(pyres) #Comparisons (I dont implement === and !== here, these # will be implemented as external functions later) # <, <=, !=, ==, >=, > are implemented here. def abstract_relational_comparison(self, other, self_first=True): ''' self<other if self_first else other<self. Returns the result of the question: is self smaller than other? in case self_first is false it returns the answer of: is other smaller than self. result is PyJs type: bool or undefined''' px = self.to_primitive('Number') py = other.to_primitive('Number') if not self_first: #reverse order px, py = py, px if not (px.Class=='String' and py.Class=='String'): px, py = px.to_number(), py.to_number() if px.is_nan() or py.is_nan(): return undefined return Js(px.value<py.value) # same cmp algorithm else: # I am pretty sure that python has the same # string cmp algorithm but I have to confirm it return Js(px.value<py.value) #< def __lt__(self, other): res = self.abstract_relational_comparison(other, True) if res.is_undefined(): return false return res #<= def __le__(self, other): res = self.abstract_relational_comparison(other, False) if res.is_undefined(): return false return res.neg() #>= def __ge__(self, other): res = self.abstract_relational_comparison(other, True) if res.is_undefined(): return false return res.neg() #> def __gt__(self, other): res = self.abstract_relational_comparison(other, False) if res.is_undefined(): return false return res def abstract_equality_comparison(self, other): ''' returns the result of JS == compare. result is PyJs type: bool''' tx, ty = self.TYPE, other.TYPE if tx==ty: if tx=='Undefined' or tx=='Null': return true if tx=='Number' or tx=='String' or tx=='Boolean': return Js(self.value==other.value) return Js(self is other) # Object elif (tx=='Undefined' and ty=='Null') or (ty=='Undefined' and tx=='Null'): return true elif tx=='Number' and ty=='String': return self.abstract_equality_comparison(other.to_number()) elif tx=='String' and ty=='Number': return self.to_number().abstract_equality_comparison(other) elif tx=='Boolean': return self.to_number().abstract_equality_comparison(other) elif ty=='Boolean': return self.abstract_equality_comparison(other.to_number()) elif (tx=='String' or tx=='Number') and other.is_object(): return self.abstract_equality_comparison(other.to_primitive()) elif (ty=='String' or ty=='Number') and self.is_object(): return self.to_primitive().abstract_equality_comparison(other) else: return false #== def __eq__(self, other): return self.abstract_equality_comparison(other) #!= def __ne__(self, other): return self.abstract_equality_comparison(other).neg() #Other methods (instanceof) def instanceof(self, other): '''checks if self is instance of other''' if not hasattr(other, 'has_instance'): return false return other.has_instance(self) #iteration def __iter__(self): #Returns a generator of all own enumerable properties # since the size od self.own can change we need to use different method of iteration. # SLOW! New items will NOT show up. returned = {} if not self.IS_CHILD_SCOPE: cands = sorted(name for name in self.own if self.own[name]['enumerable']) else: cands = sorted(name for name in self.own) for cand in cands: check = self.own.get(cand) if check and check['enumerable']: yield Js(cand) def contains(self, other): if not self.is_object(): raise MakeError('TypeError',"You can\'t use 'in' operator to search in non-objects") return Js(self.has_property(other.to_string().value)) #Other Special methods def __call__(self, *args): '''Call a property prop as a function (this will be global object). NOTE: dont pass this and arguments here, these will be added automatically!''' if not self.is_callable(): raise MakeError('TypeError', '%s is not a function'%self.typeof()) return self.call(self.GlobalObject, args) def create(self, *args): '''Generally not a constructor, raise an error''' raise MakeError('TypeError', '%s is not a constructor'%self.Class) def __unicode__(self): return self.to_string().value def __repr__(self): if self.Class=='Object': res = [] for e in self: res.append(str_repr(e.value)+': '+str_repr(self.get(e))) return '{%s}'%', '.join(res) elif self.Class=='String': return str_repr(self.value) elif self.Class=='Array': res = [] for e in self: res.append(repr(self.get(e))) return '[%s]'%', '.join(res) else: val = str_repr(self.to_string().value) return val def _fuck_python3(self): # hack to make object hashable in python 3 (__eq__ causes problems) return object.__hash__(self) def callprop(self, prop, *args): '''Call a property prop as a method (this will be self). NOTE: dont pass this and arguments here, these will be added automatically!''' if not isinstance(prop, basestring): prop = prop.to_string().value cand = self.get(prop) if not cand.is_callable(): raise MakeError('TypeError','%s is not a function'%cand.typeof()) return cand.call(self, args) def to_python(self): """returns equivalent python object. for example if this object is javascript array then this method will return equivalent python array""" return to_python(self) def to_py(self): """returns equivalent python object. for example if this object is javascript array then this method will return equivalent python array""" return self.to_python() if six.PY3: PyJs.__hash__ = PyJs._fuck_python3 PyJs.__truediv__ = PyJs.__div__ #Define some more classes representing operators: def PyJsStrictEq(a, b): '''a===b''' tx, ty = Type(a), Type(b) if tx!=ty: return false if tx=='Undefined' or tx=='Null': return true if a.is_primitive(): #string bool and number case return Js(a.value==b.value) return Js(a is b) # object comparison def PyJsStrictNeq(a, b): ''' a!==b''' return PyJsStrictEq(a, b).neg() def PyJsBshift(a, b): """a>>>b""" return a.pyjs_bshift(b) def PyJsComma(a, b): return b class PyJsException(Exception): def __str__(self): if self.mes.Class=='Error': return self.mes.callprop('toString').value else: return unicode(self.mes) class PyJsSwitchException(Exception): pass PyJs.MakeError = staticmethod(MakeError) def JsToPyException(js): temp = PyJsException() temp.mes = js return temp def PyExceptionToJs(py): return py.mes #Scope class it will hold all the variables accessible to user class Scope(PyJs): Class = 'global' extensible = True IS_CHILD_SCOPE = True # todo speed up # in order to speed up this very important class the top scope should behave differently than # child scopes, child scope should not have this property descriptor thing because they cant be changed anyway # they are all confugurable= False def __init__(self, scope, closure=None): """Doc""" self.prototype = closure if closure is None: # global, top level scope self.own = {} for k, v in six.iteritems(scope): # set all the global items self.define_own_property(k, {'value': v, 'configurable': False, 'writable': False, 'enumerable': False}) else: # not global, less powerful but faster closure. self.own = scope # simple dictionary which maps name directly to js object. def register(self, lval): # registered keeps only global registered variables if self.prototype is None: # define in global scope if lval in self.own: self.own[lval]['configurable'] = False else: self.define_own_property(lval, {'value': undefined, 'configurable': False, 'writable': True, 'enumerable': True}) elif lval not in self.own: # define in local scope since it has not been defined yet self.own[lval] = undefined # default value def registers(self, lvals): """register multiple variables""" for lval in lvals: self.register(lval) def put(self, lval, val, op=None): if self.prototype is None: # global scope put, simple return PyJs.put(self, lval, val, op) else: # trying to put in local scope # we dont know yet in which scope we should place this var if lval in self.own: if op: # increment operation val = getattr(self.own[lval], OP_METHODS[op])(val) self.own[lval] = val return val else: #try to put in the lower scope since we cant put in this one (var wasn't registered) return self.prototype.put(lval, val, op) def force_own_put(self, prop, val, configurable=False): if self.prototype is None: # global scope self.own[prop] = {'value': val, 'writable': True, 'enumerable':True, 'configurable':configurable} else: self.own[prop] = val def get(self, prop, throw=True): #note prop is always a Py String if not isinstance(prop, basestring): prop = prop.to_string().value if self.prototype is not None: # fast local scope cand = self.own.get(prop) if cand is None: return self.prototype.get(prop, throw) return cand # slow, global scope if prop not in self.own: if throw: raise MakeError('ReferenceError', '%s is not defined' % prop) return undefined return PyJs.get(self, prop) def delete(self, lval): if self.prototype is not None: if lval in self.own: return false return self.prototype.delete(lval) # we are in global scope here. Must exist and be configurable to delete if lval not in self.own: # this lval does not exist, why do you want to delete it??? return true if self.own[lval]['configurable']: del self.own[lval] return true # not configurable, cant delete return false def pyimport(self, name, module): self.register(name) self.put(name, py_wrap(module)) def __repr__(self): return u'[Object Global]' def to_python(self): return to_python(self) class This(Scope): IS_CHILD_SCOPE = False def get(self, prop, throw=False): return Scope.get(self, prop, throw) class JsObjectWrapper(object): def __init__(self, obj): self.__dict__['_obj'] = obj def __call__(self, *args): args = tuple(Js(e) for e in args) if '_prop_of' in self.__dict__: parent, meth = self.__dict__['_prop_of'] return to_python(parent._obj.callprop(meth, *args)) return to_python(self._obj(*args)) def __getattr__(self, item): if item == 'new' and self._obj.is_callable(): # return instance initializer def PyJsInstanceInit(*args): args = tuple(Js(e) for e in args) return self._obj.create(*args).to_python() return PyJsInstanceInit cand = to_python(self._obj.get(str(item))) # handling method calling... obj.meth(). Value of this in meth should be self if isinstance(cand, self.__class__): cand.__dict__['_prop_of'] = self, str(item) return cand def __setattr__(self, item, value): self._obj.put(str(item), Js(value)) def __getitem__(self, item): cand = to_python(self._obj.get(str(item))) if isinstance(cand, self.__class__): cand.__dict__['_prop_of'] = self, str(item) return cand def __setitem__(self, item, value): self._obj.put(str(item), Js(value)) def __iter__(self): if self._obj.Class=='Array': return iter(self.to_list()) elif self._obj.Class=='Object': return iter(self.to_dict()) else: raise MakeError('TypeError', '%s is not iterable in Python' % self._obj.Class) def __repr__(self): if self._obj.is_primitive() or self._obj.is_callable(): return repr(self._obj) elif self._obj.Class in {'Array', 'Arguments'}: return repr(self.to_list()) return repr(self.to_dict()) def __len__(self): return len(self._obj) def __nonzero__(self): return bool(self._obj) def __bool__(self): return bool(self._obj) def to_dict(self): return to_dict(self.__dict__['_obj']) def to_list(self): return to_list(self.__dict__['_obj']) class PyObjectWrapper(PyJs): Class = 'PyObjectWrapper' def __init__(self, obj): self.obj = obj def get(self, prop): if not isinstance(prop, basestring): prop = prop.to_string().value try: if prop.isdigit(): return py_wrap(self.obj[int(prop)]) return py_wrap(getattr(self.obj, prop)) except: return undefined def put(self, prop, val, throw=False): if not isinstance(prop, basestring): prop = prop.to_string().value try: setattr(self.obj, prop, to_python(val)) except AttributeError: raise MakeError('TypeError', 'Read only object probably...') return val def __call__(self, *args): py_args = tuple(to_python(e) for e in args) try: py_res = self.obj.__call__(*py_args) except Exception as e: message = 'your Python function failed! ' try: message += e.message except: pass raise MakeError('Error', message) return py_wrap(py_res) def callprop(self, prop, *args): py_args = tuple(to_python(e) for e in args) if not isinstance(prop, basestring): prop = prop.to_string().value return self.get(prop)(*py_args) def delete(self, prop): if not isinstance(prop, basestring): prop = prop.to_string().value try: if prop.isdigit(): del self.obj[int(prop)] else: delattr(self.obj, prop) return true except: return false def __repr__(self): return 'PyObjectWrapper(%s)' % str(self.obj) def to_python(self): return self.obj def to_py(self): return self.obj def py_wrap(py): if isinstance(py, (FunctionType, BuiltinFunctionType, MethodType, BuiltinMethodType, dict, int, str, bool, float, list, tuple, long, basestring)) or py is None : return HJs(py) return PyObjectWrapper(py) ############################################################################## #Define types #Object class PyJsObject(PyJs): Class = 'Object' def __init__(self, prop_descs={}, prototype=None, extensible=True): self.prototype = prototype self.extensible = extensible self.own = {} for prop, desc in six.iteritems(prop_descs): self.define_own_property(prop, desc) def __repr__(self): return repr(self.to_python().to_dict()) ObjectPrototype = PyJsObject() #Function class PyJsFunction(PyJs): Class = 'Function' def __init__(self, func, prototype=None, extensible=True, source=None): cand = fix_js_args(func) has_scope = cand is func func = cand self.argcount = six.get_function_code(func).co_argcount - 2 - has_scope self.code = func self.source = source if source else '{ [python code] }' self.func_name = func.__name__ if not func.__name__.startswith('PyJs_anonymous') else '' self.extensible = extensible self.prototype = prototype self.own = {} #set own property length to the number of arguments self.define_own_property('length', {'value': Js(self.argcount), 'writable': False, 'enumerable': False, 'configurable': False}) if self.func_name: self.define_own_property('name', {'value': Js(self.func_name), 'writable': False, 'enumerable': False, 'configurable': True}) # set own prototype proto = Js({}) # constructor points to this function proto.define_own_property('constructor',{'value': self, 'writable': True, 'enumerable': False, 'configurable': True}) self.define_own_property('prototype', {'value': proto, 'writable': True, 'enumerable': False, 'configurable': False}) def _set_name(self, name): '''name is py type''' if self.own.get('name'): self.func_name = name self.own['name']['value'] = Js(name) def construct(self, *args): proto = self.get('prototype') if not proto.is_object(): # set to standard prototype proto = ObjectPrototype obj = PyJsObject(prototype=proto) cand = self.call(obj, *args) return cand if cand.is_object() else obj def call(self, this, args=()): '''Calls this function and returns a result (converted to PyJs type so func can return python types) this must be a PyJs object and args must be a python tuple of PyJs objects. arguments object is passed automatically and will be equal to Js(args) (tuple converted to arguments object).You dont need to worry about number of arguments you provide if you supply less then missing ones will be set to undefined (but not present in arguments object). And if you supply too much then excess will not be passed (but they will be present in arguments object). ''' if not hasattr(args, '__iter__'): #get rid of it later args = (args,) args = tuple(Js(e) for e in args) # this wont be needed later arguments = PyJsArguments(args, self) # tuple will be converted to arguments object. arglen = self.argcount #function expects this number of args. if len(args)>arglen: args = args[0:arglen] elif len(args)<arglen: args += (undefined,)*(arglen-len(args)) args += this, arguments #append extra params to the arg list try: return Js(self.code(*args)) except NotImplementedError: raise except RuntimeError as e: # maximum recursion raise MakeError('RangeError', e.message if not isinstance(e, NotImplementedError) else 'Not implemented!') def has_instance(self, other): # I am not sure here so instanceof may not work lol. if not other.is_object(): return false proto = self.get('prototype') if not proto.is_object(): raise TypeError('Function has non-object prototype in instanceof check') while True: other = other.prototype if not other: # todo make sure that the condition is not None or null return false if other is proto: return true def create(self, *args): proto = self.get('prototype') if not proto.is_object(): proto = ObjectPrototype new = PyJsObject(prototype=proto) res = self.call(new, args) if res.is_object(): return res return new class PyJsBoundFunction(PyJsFunction): def __init__(self, target, bound_this, bound_args): self.target = target self.bound_this = bound_this self.bound_args = bound_args self.argcount = target.argcount self.code = target.code self.source = target.source self.func_name = target.func_name self.extensible = True self.prototype = FunctionPrototype self.own = {} # set own property length to the number of arguments self.define_own_property('length', {'value': target.get('length')-Js(len(self.bound_args)), 'writable': False, 'enumerable': False, 'configurable': False}) if self.func_name: self.define_own_property('name', {'value': Js(self.func_name), 'writable': False, 'enumerable': False, 'configurable': True}) # set own prototype proto = Js({}) # constructor points to this function proto.define_own_property('constructor', {'value': self, 'writable': True, 'enumerable': False, 'configurable': True}) self.define_own_property('prototype', {'value': proto, 'writable': True, 'enumerable': False, 'configurable': False}) def call(self, this, args=()): return self.target.call(self.bound_this, self.bound_args+args) def has_instance(self, other): return self.target.has_instance(other) PyJs.PyJsBoundFunction = PyJsBoundFunction OP_METHODS = {'*': '__mul__', '/': '__div__', '%': '__mod__', '+': '__add__', '-': '__sub__', '<<': '__lshift__', '>>': '__rshift__', '&': '__and__', '^': '__xor__', '|': '__or__', '>>>': 'pyjs_bshift'} def Empty(): return Js(None) #Number class PyJsNumber(PyJs): #Note i dont implement +0 and -0. Just 0. TYPE = 'Number' Class = 'Number' NumberPrototype = PyJsObject({}, ObjectPrototype) NumberPrototype.Class = 'Number' NumberPrototype.value = 0 Infinity = PyJsNumber(float('inf'), NumberPrototype) NaN = PyJsNumber(float('nan'), NumberPrototype) PyJs.NaN = NaN PyJs.Infinity = Infinity # This dict aims to increase speed of string creation by storing character instances CHAR_BANK = {} NUM_BANK = {} PyJs.CHAR_BANK = CHAR_BANK #String # Different than implementation design in order to improve performance #for example I dont create separate property for each character in string, it would take ages. class PyJsString(PyJs): TYPE = 'String' Class = 'String' extensible = False def __init__(self, value=None, prototype=None): '''Constructor for Number String and Boolean''' if not isinstance(value, basestring): raise TypeError # this will be internal error self.value = value self.prototype = prototype self.own = {} # this should be optimized because its mych slower than python str creation (about 50 times!) # Dont create separate properties for every index. Just self.own['length'] = {'value': Js(len(value)), 'writable': False, 'enumerable': False, 'configurable': False} if len(value)==1: CHAR_BANK[value] = self #, 'writable': False, # 'enumerable': True, 'configurable': False} def get(self, prop): if not isinstance(prop, basestring): prop = prop.to_string().value try: index = int(prop) if index<0: return undefined char = self.value[index] if char not in CHAR_BANK: Js(char) # this will add char to CHAR BANK return CHAR_BANK[char] except Exception: pass return PyJs.get(self, prop) def can_put(self, prop): return False def __iter__(self): for i in xrange(len(self.value)): yield Js(i) # maybe create an int bank? StringPrototype = PyJsObject({}, ObjectPrototype) StringPrototype.Class = 'String' StringPrototype.value = '' CHAR_BANK[''] = Js('') #Boolean class PyJsBoolean(PyJs): TYPE = 'Boolean' Class = 'Boolean' BooleanPrototype = PyJsObject({}, ObjectPrototype) BooleanPrototype.Class = 'Boolean' BooleanPrototype.value = False true = PyJsBoolean(True, BooleanPrototype) false = PyJsBoolean(False, BooleanPrototype) #Undefined class PyJsUndefined(PyJs): TYPE = 'Undefined' Class = 'Undefined' def __init__(self): pass undefined = PyJsUndefined() #Null class PyJsNull(PyJs): TYPE = 'Null' Class = 'Null' def __init__(self): pass null = PyJsNull() PyJs.null = null class PyJsArray(PyJs): Class = 'Array' def __init__(self, arr=[], prototype=None): self.extensible = True self.prototype = prototype self.own = {'length' : {'value': Js(0), 'writable': True, 'enumerable': False, 'configurable': False}} for i, e in enumerate(arr): self.define_own_property(str(i), {'value': Js(e), 'writable': True, 'enumerable': True, 'configurable': True}) def define_own_property(self, prop, desc): old_len_desc = self.get_own_property('length') old_len = old_len_desc['value'].value # value is js type so convert to py. if prop=='length': if 'value' not in desc: return PyJs.define_own_property(self, prop, desc) new_len = desc['value'].to_uint32() if new_len!=desc['value'].to_number().value: raise MakeError('RangeError', 'Invalid range!') new_desc = {k:v for k,v in six.iteritems(desc)} new_desc['value'] = Js(new_len) if new_len>=old_len: return PyJs.define_own_property(self, prop, new_desc) if not old_len_desc['writable']: return False if 'writable' not in new_desc or new_desc['writable']==True: new_writable = True else: new_writable = False new_desc['writable'] = True if not PyJs.define_own_property(self, prop, new_desc): return False if new_len<old_len: # not very efficient for sparse arrays, so using different method for sparse: if old_len>30*len(self.own): for ele in self.own.keys(): if ele.isdigit() and int(ele)>=new_len: if not self.delete(ele): # if failed to delete set len to current len and reject. new_desc['value'] = Js(old_len+1) if not new_writable: new_desc['writable'] = False PyJs.define_own_property(self, prop, new_desc) return False old_len = new_len else: # standard method while new_len<old_len: old_len -= 1 if not self.delete(str(int(old_len))): # if failed to delete set len to current len and reject. new_desc['value'] = Js(old_len+1) if not new_writable: new_desc['writable'] = False PyJs.define_own_property(self, prop, new_desc) return False if not new_writable: self.own['length']['writable'] = False return True elif prop.isdigit(): index = int(int(prop) % 2**32) if index>=old_len and not old_len_desc['writable']: return False if not PyJs.define_own_property(self, prop, desc): return False if index>=old_len: old_len_desc['value'] = Js(index + 1) return True else: return PyJs.define_own_property(self, prop, desc) def to_list(self): return [self.get(str(e)) for e in xrange(self.get('length').to_uint32())] def __repr__(self): return repr(self.to_python().to_list()) ArrayPrototype = PyJsArray([], ObjectPrototype) class PyJsArguments(PyJs): Class = 'Arguments' def __init__(self, args, callee): self.own = {} self.extensible = True self.prototype = ObjectPrototype self.define_own_property('length', {'value': Js(len(args)), 'writable': True, 'enumerable': False, 'configurable': True}) self.define_own_property('callee', {'value': callee, 'writable': True, 'enumerable': False, 'configurable': True}) for i, e in enumerate(args): self.put(str(i), Js(e)) def to_list(self): return [self.get(str(e)) for e in xrange(self.get('length').to_uint32())] #We can define function proto after number proto because func uses number in its init FunctionPrototype = PyJsFunction(Empty, ObjectPrototype) FunctionPrototype.own['name']['value'] = Js('') # I will not rewrite RegExp engine from scratch. I will use re because its much faster. # I have to only make sure that I am handling all the differences correctly. REGEXP_DB = {} class PyJsRegExp(PyJs): Class = 'RegExp' extensible = True def __init__(self, regexp, prototype=None): self.prototype = prototype self.glob = False self.ignore_case = 0 self.multiline = 0 # self._cache = {'str':'NoStringEmpty23093', # 'iterator': None, # 'lastpos': -1, # 'matches': {}} flags = '' if not regexp[-1]=='/': #contains some flags (allowed are i, g, m spl = regexp.rfind('/') flags = set(regexp[spl+1:]) self.value = regexp[1:spl] if 'g' in flags: self.glob = True if 'i' in flags: self.ignore_case = re.IGNORECASE if 'm' in flags: self.multiline = re.MULTILINE else: self.value = regexp[1:-1] try: if self.value in REGEXP_DB: self.pat = REGEXP_DB[regexp] else: comp = 'None' # we have to check whether pattern is valid. # also this will speed up matching later # todo critical fix patter conversion etc. ..!!!!! # ugly hacks porting js reg exp to py reg exp works in 99% of cases ;) possible_fixes = [ (u'[]', u'[\0]'), (u'[^]', u'[^\0]'), (u'nofix1791', u'nofix1791') ] reg = self.value for fix, rep in possible_fixes: comp = REGEXP_CONVERTER._interpret_regexp(reg, flags) #print 'reg -> comp', reg, '->', comp try: self.pat = re.compile(comp, self.ignore_case | self.multiline) #print reg, '->', comp break except: reg = reg.replace(fix, rep) # print 'Fix', fix, '->', rep, '=', reg else: raise REGEXP_DB[regexp] = self.pat except: #print 'Invalid pattern but fuck it', self.value, comp raise MakeError('SyntaxError', 'Invalid RegExp pattern: %s -> %s'% (repr(self.value), repr(comp))) # now set own properties: self.own = {'source' : {'value': Js(self.value), 'enumerable': False, 'writable': False, 'configurable': False}, 'global' : {'value': Js(self.glob), 'enumerable': False, 'writable': False, 'configurable': False}, 'ignoreCase' : {'value': Js(bool(self.ignore_case)), 'enumerable': False, 'writable': False, 'configurable': False}, 'multiline' : {'value': Js(bool(self.multiline)), 'enumerable': False, 'writable': False, 'configurable': False}, 'lastIndex' : {'value': Js(0), 'enumerable': False, 'writable': True, 'configurable': False}} def match(self, string, pos): '''string is of course py string''' return self.pat.match(string, pos) # way easier :) # assert 0<=pos <= len(string) # if not pos: # return re.match(self.pat, string) # else: # if self._cache['str']==string: # if pos>self._cache['lastpos']: # for m in self._cache['iterator']: # start = m.start() # self._cache['lastpos'] = start # self._cache['matches'][start] = m # if start==pos: # return m # elif start>pos: # return None # self._cache['lastpos'] = len(string) # return None # else: # return self._cache['matches'].get(pos) # else: # self._cache['str'] = string # self._cache['matches'] = {} # self._cache['lastpos'] = -1 # self._cache['iterator'] = re.finditer(self.pat, string) # return self.match(string, pos) def JsRegExp(source): # Takes regexp literal! return PyJsRegExp(source, RegExpPrototype) RegExpPrototype = PyJsRegExp('/(?:)/', ObjectPrototype) ####Exceptions: default_attrs = {'writable':True, 'enumerable':False, 'configurable':True} def fill_in_props(obj, props, default_desc): for prop, value in props.items(): default_desc['value'] = Js(value) obj.define_own_property(prop, default_desc) class PyJsError(PyJs): Class = 'Error' extensible = True def __init__(self, message=None, prototype=None): self.prototype = prototype self.own = {} if message is not None: self.put('message', Js(message).to_string()) self.own['message']['enumerable'] = False ErrorPrototype = PyJsError(Js(''), ObjectPrototype) @Js def Error(message): return PyJsError(None if message.is_undefined() else message, ErrorPrototype) Error.create = Error err = {'name': 'Error', 'constructor': Error} fill_in_props(ErrorPrototype, err, default_attrs) Error.define_own_property('prototype', {'value': ErrorPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) def define_error_type(name): TypeErrorPrototype = PyJsError(None, ErrorPrototype) @Js def TypeError(message): return PyJsError(None if message.is_undefined() else message, TypeErrorPrototype) err = {'name': name, 'constructor': TypeError} fill_in_props(TypeErrorPrototype, err, default_attrs) TypeError.define_own_property('prototype', {'value': TypeErrorPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) ERRORS[name] = TypeError ERRORS = {'Error': Error} ERROR_NAMES = ['Eval', 'Type', 'Range', 'Reference', 'Syntax', 'URI'] for e in ERROR_NAMES: define_error_type(e+'Error') ############################################################################## # Import and fill prototypes here. #this works only for data properties def fill_prototype(prototype, Class, attrs, constructor=False): for i in dir(Class): e = getattr(Class, i) if six.PY2: if hasattr(e, '__func__'): temp = PyJsFunction(e.__func__, FunctionPrototype) attrs = {k:v for k,v in attrs.iteritems()} attrs['value'] = temp prototype.define_own_property(i, attrs) else: if hasattr(e, '__call__') and not i.startswith('__'): temp = PyJsFunction(e, FunctionPrototype) attrs = {k:v for k,v in attrs.items()} attrs['value'] = temp prototype.define_own_property(i, attrs) if constructor: attrs['value'] = constructor prototype.define_own_property('constructor', attrs) PyJs.undefined = undefined PyJs.Js = staticmethod(Js) from .prototypes import jsfunction, jsobject, jsnumber, jsstring, jsboolean, jsarray, jsregexp, jserror #Object proto fill_prototype(ObjectPrototype, jsobject.ObjectPrototype, default_attrs) #Define __proto__ accessor (this cant be done by fill_prototype since) @Js def __proto__(): return this.prototype if this.prototype is not None else null getter = __proto__ @Js def __proto__(val): if val.is_object(): this.prototype = val setter = __proto__ ObjectPrototype.define_own_property('__proto__', {'set': setter, 'get': getter, 'enumerable': False, 'configurable':True}) #Function proto fill_prototype(FunctionPrototype, jsfunction.FunctionPrototype, default_attrs) #Number proto fill_prototype(NumberPrototype, jsnumber.NumberPrototype, default_attrs) #String proto fill_prototype(StringPrototype, jsstring.StringPrototype, default_attrs) #Boolean proto fill_prototype(BooleanPrototype, jsboolean.BooleanPrototype, default_attrs) #Array proto fill_prototype(ArrayPrototype, jsarray.ArrayPrototype, default_attrs) #Error proto fill_prototype(ErrorPrototype, jserror.ErrorPrototype, default_attrs) #RegExp proto fill_prototype(RegExpPrototype, jsregexp.RegExpPrototype, default_attrs) # add exec to regexpfunction (cant add it automatically because of its name :( RegExpPrototype.own['exec'] = RegExpPrototype.own['exec2'] del RegExpPrototype.own['exec2'] ######################################################################### # Constructors # String @Js def String(st): if not len(arguments): return Js('') return arguments[0].to_string() @Js def string_constructor(): temp = PyJsObject(prototype=StringPrototype) temp.Class = 'String' #temp.TYPE = 'String' if not len(arguments): temp.value = '' else: temp.value = arguments[0].to_string().value for i, ch in enumerate(temp.value): # this will make things long... temp.own[str(i)] = {'value': Js(ch), 'writable': False, 'enumerable': True, 'configurable': True} temp.own['length'] = {'value': Js(len(temp.value)), 'writable': False, 'enumerable': False, 'configurable': False} return temp String.create = string_constructor # RegExp REG_EXP_FLAGS = {'g', 'i', 'm'} @Js def RegExp(pattern, flags): if pattern.Class=='RegExp': if not flags.is_undefined(): raise MakeError('TypeError', 'Cannot supply flags when constructing one RegExp from another') # return unchanged return pattern #pattern is not a regexp if pattern.is_undefined(): pattern = '' else: pattern = pattern.to_string().value # try: # pattern = REGEXP_CONVERTER._unescape_string(pattern.to_string().value) # except: # raise MakeError('SyntaxError', 'Invalid regexp') flags = flags.to_string().value if not flags.is_undefined() else '' for flag in flags: if flag not in REG_EXP_FLAGS: raise MakeError('SyntaxError', 'Invalid flags supplied to RegExp constructor "%s"' % flag) if len(set(flags))!=len(flags): raise MakeError('SyntaxError', 'Invalid flags supplied to RegExp constructor "%s"' % flags) pattern = '/%s/'%(pattern if pattern else '(?:)') + flags return JsRegExp(pattern) RegExp.create = RegExp PyJs.RegExp = RegExp # Number @Js def Number(): if len(arguments): return arguments[0].to_number() else: return Js(0) @Js def number_constructor(): temp = PyJsObject(prototype=NumberPrototype) temp.Class = 'Number' #temp.TYPE = 'Number' if len(arguments): temp.value = arguments[0].to_number().value else: temp.value = 0 return temp Number.create = number_constructor # Boolean @Js def Boolean(value): return value.to_boolean() @Js def boolean_constructor(value): temp = PyJsObject(prototype=BooleanPrototype) temp.Class = 'Boolean' #temp.TYPE = 'Boolean' temp.value = value.to_boolean().value return temp Boolean.create = boolean_constructor ############################################################################## def appengine(code): try: return translator.translate_js(code.decode('utf-8')) except: return traceback.format_exc() builtins = ('true','false','null','undefined','Infinity', 'NaN') scope = dict(zip(builtins, [eval(e) for e in builtins])) JS_BUILTINS = {k:v for k,v in scope.items()} # Fill in NUM_BANK for e in xrange(-2**10,2**14): NUM_BANK[e] = Js(e) if __name__=='__main__': print(ObjectPrototype.get('toString').callprop('call')) print(FunctionPrototype.own) a= null-Js(49404) x = a.put('ser', Js('der')) print(Js(0) or Js('p') and Js(4.0000000000050000001)) FunctionPrototype.put('Chuj', Js(409)) for e in FunctionPrototype: print('Obk', e.get('__proto__').get('__proto__').get('__proto__'), e) import code s = Js(4) b = Js(6) s2 = Js(4) o = ObjectPrototype o.put('x', Js(100)) var = Scope(scope) e = code.InteractiveConsole(globals()) #e.raw_input = interactor e.interact()
67,892
Python
.py
1,648
30.938107
140
0.566935
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,827
jsparser.py
evilhero_mylar/lib/js2py/legecy_translators/jsparser.py
""" The process of translating JS will go like that: # TOP = 'imports and scope set' 1. Remove all the comments 2. Replace number, string and regexp literals with markers 4. Remove global Functions and move their translation to the TOP. Also add register code there. 5. Replace inline functions with lvals 6. Remove List and Object literals and replace them with lvals 7. Find and remove var declarations, generate python register code that would go on TOP. Here we should be left with global code only where 1 line of js code = 1 line of python code. Routine translating this code should be called glob_translate: 1. Search for outer structures and translate them using glob and inside using exps_translate exps_translate routine: 1. Remove outer {} 2. Split lines at ; 3. Convert line by line using exp_translate 4. In case of error in 3 try to insert ; according to ECMA rules and repeat 3. exp_translate routine: It takes a single line of JS code and returns a SINGLE line of Python code. Note var is not present here because it was removed in previous stages. If case of parsing errors it must return a pos of error. 1. Convert all assignment operations to put operations, this may be hard :( 2. Convert all gets and calls to get and callprop. 3. Convert unary operators like typeof, new, !, delete. Delete can be handled by replacing last get method with delete. 4. Convert remaining operators that are not handled by python eg: === and , lval format PyJsLvalNR marker PyJs(TYPE_NAME)(NR) TODO 1. Number literal replacement 2. Array literal replacement 3. Object literal replacement 5. Function replacement 4. Literal replacement translators """ from utils import * OP_METHODS = {'*': '__mul__', '/': '__div__', '%': '__mod__', '+': '__add__', '-': '__sub__', '<<': '__lshift__', '>>': '__rshift__', '&': '__and__', '^': '__xor__', '|': '__or__'} def dbg(source): try: with open('C:\Users\Piotrek\Desktop\dbg.py','w') as f: f.write(source) except: pass def indent(lines, ind=4): return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ') def inject_before_lval(source, lval, code): if source.count(lval)>1: dbg(source) print print lval raise RuntimeError('To many lvals (%s)' % lval) elif not source.count(lval): dbg(source) print print lval assert lval not in source raise RuntimeError('No lval found "%s"' % lval) end = source.index(lval) inj = source.rfind('\n', 0, end) ind = inj while source[ind+1]==' ': ind+=1 ind -= inj return source[:inj+1]+ indent(code, ind) + source[inj+1:] def bracket_split(source, brackets=('()','{}','[]'), strip=False): """DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)""" starts = [e[0] for e in brackets] in_bracket = 0 n = 0 last = 0 while n<len(source): e = source[n] if not in_bracket and e in starts: in_bracket = 1 start = n b_start, b_end = brackets[starts.index(e)] elif in_bracket: if e==b_start: in_bracket += 1 elif e==b_end: in_bracket -= 1 if not in_bracket: if source[last:start]: yield source[last:start] last = n+1 yield source[start+strip:n+1-strip] n+=1 if source[last:]: yield source[last:] def pass_bracket(source, start, bracket='()'): """Returns content of brackets with brackets and first pos after brackets if source[start] is followed by some optional white space and brackets. Otherwise None""" e = bracket_split(source[start:],[bracket], False) try: cand = e.next() except StopIteration: return None, None if not cand.strip(): #white space... try: res = e.next() return res, start + len(cand) + len(res) except StopIteration: return None, None elif cand[-1] == bracket[1]: return cand, start + len(cand) else: return None, None def startswith_keyword(start, keyword): start = start.lstrip() if start.startswith(keyword): if len(keyword)<len(start): if start[len(keyword)] in IDENTIFIER_PART: return False return True return False def endswith_keyword(ending, keyword): ending = ending.rstrip() if ending.endswith(keyword): if len(keyword)<len(ending): if ending[len(ending)-len(keyword)-1] in IDENTIFIER_PART: return False return True return False def pass_white(source, start): n = start while n<len(source): if source[n] in SPACE: n += 1 else: break return n def except_token(source, start, token, throw=True): """Token can be only a single char. Returns position after token if found. Otherwise raises syntax error if throw otherwise returns None""" start = pass_white(source, start) if start<len(source) and source[start]==token: return start+1 if throw: raise SyntaxError('Missing token. Expected %s'%token) return None def except_keyword(source, start, keyword): """ Returns position after keyword if found else None Note: skips white space""" start = pass_white(source, start) kl = len(keyword) #keyword len if kl+start > len(source): return None if source[start:start+kl] != keyword: return None if kl+start<len(source) and source[start+kl] in IDENTIFIER_PART: return None return start + kl def parse_identifier(source, start, throw=True): """passes white space from start and returns first identifier, if identifier invalid and throw raises SyntaxError otherwise returns None""" start = pass_white(source, start) end = start if not end<len(source): if throw: raise SyntaxError('Missing identifier!') return None if source[end] not in IDENTIFIER_START: if throw: raise SyntaxError('Invalid identifier start: "%s"'%source[end]) return None end += 1 while end < len(source) and source[end] in IDENTIFIER_PART: end += 1 if not is_valid_lval(source[start:end]): if throw: raise SyntaxError('Invalid identifier name: "%s"'%source[start:end]) return None return source[start:end], end def argsplit(args, sep=','): """used to split JS args (it is not that simple as it seems because sep can be inside brackets). pass args *without* brackets! Used also to parse array and object elements, and more""" parsed_len = 0 last = 0 splits = [] for e in bracket_split(args, brackets=['()', '[]', '{}']): if e[0] not in {'(', '[', '{'}: for i, char in enumerate(e): if char==sep: splits.append(args[last:parsed_len+i]) last = parsed_len + i + 1 parsed_len += len(e) splits.append(args[last:]) return splits def split_add_ops(text): """Specialized function splitting text at add/sub operators. Operands are *not* translated. Example result ['op1', '+', 'op2', '-', 'op3']""" n = 0 text = text.replace('++', '##').replace('--', '@@') #text does not normally contain any of these spotted = False # set to true if noticed anything other than +- or white space last = 0 while n<len(text): e = text[n] if e=='+' or e=='-': if spotted: yield text[last:n].replace('##', '++').replace('@@', '--') yield e last = n+1 spotted = False elif e=='/' or e=='*' or e=='%': spotted = False elif e!=' ': spotted = True n+=1 yield text[last:n].replace('##', '++').replace('@@', '--') def split_at_any(text, lis, translate=False, not_before=[], not_after=[], validitate=None): """ doc """ lis.sort(key=lambda x: len(x), reverse=True) last = 0 n = 0 text_len = len(text) while n<text_len: if any(text[:n].endswith(e) for e in not_before): #Cant end with end before n+=1 continue for e in lis: s = len(e) if s+n>text_len: continue if validitate and not validitate(e, text[:n], text[n+s:]): continue if any(text[n+s:].startswith(e) for e in not_after): #Cant end with end before n+=1 break if e==text[n:n+s]: yield text[last:n] if not translate else translate(text[last:n]) yield e n+=s last = n break else: n+=1 yield text[last:n] if not translate else translate(text[last:n]) def split_at_single(text, sep, not_before=[], not_after=[]): """Works like text.split(sep) but separated fragments cant end with not_before or start with not_after""" n = 0 lt, s= len(text), len(sep) last = 0 while n<lt: if not s+n>lt: if sep==text[n:n+s]: if any(text[last:n].endswith(e) for e in not_before): pass elif any(text[n+s:].startswith(e) for e in not_after): pass else: yield text[last:n] last = n+s n += s-1 n+=1 yield text[last:]
9,809
Python
.py
267
28.614232
117
0.587393
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,828
flow.py
evilhero_mylar/lib/js2py/legecy_translators/flow.py
"""This module translates JS flow into PY flow. Translates: IF ELSE DO WHILE WHILE FOR 123 FOR iter CONTINUE, BREAK, RETURN, LABEL, THROW, TRY, SWITCH """ from utils import * from jsparser import * from nodevisitor import exp_translator import random TO_REGISTER = [] CONTINUE_LABEL = 'JS_CONTINUE_LABEL_%s' BREAK_LABEL = 'JS_BREAK_LABEL_%s' PREPARE = '''HOLDER = var.own.get(NAME)\nvar.force_own_put(NAME, PyExceptionToJs(PyJsTempException))\n''' RESTORE = '''if HOLDER is not None:\n var.own[NAME] = HOLDER\nelse:\n del var.own[NAME]\ndel HOLDER\n''' TRY_CATCH = '''%stry:\nBLOCKfinally:\n%s''' % (PREPARE, indent(RESTORE)) def get_continue_label(label): return CONTINUE_LABEL%label.encode('hex') def get_break_label(label): return BREAK_LABEL%label.encode('hex') def pass_until(source, start, tokens=(';',)): while start < len(source) and source[start] not in tokens: start+=1 return start+1 def do_bracket_exp(source, start, throw=True): bra, cand = pass_bracket(source, start, '()') if throw and not bra: raise SyntaxError('Missing bracket expression') bra = exp_translator(bra[1:-1]) if throw and not bra: raise SyntaxError('Empty bracket condition') return bra, cand if bra else start def do_if(source, start): start += 2 # pass this if bra, start = do_bracket_exp(source, start, throw=True) statement, start = do_statement(source, start) if statement is None: raise SyntaxError('Invalid if statement') translated = 'if %s:\n'%bra+indent(statement) elseif = except_keyword(source, start, 'else') is_elseif = False if elseif: start = elseif if except_keyword(source, start, 'if'): is_elseif = True elseif, start = do_statement(source, start) if elseif is None: raise SyntaxError('Invalid if statement)') if is_elseif: translated += 'el' + elseif else: translated += 'else:\n'+ indent(elseif) return translated, start def do_statement(source, start): """returns none if not found other functions that begin with 'do_' raise also this do_ type function passes white space""" start = pass_white(source, start) # start is the fist position after initial start that is not a white space or \n if not start < len(source): #if finished parsing return None return None, start if any(startswith_keyword(source[start:], e) for e in {'case', 'default'}): return None, start rest = source[start:] for key, meth in KEYWORD_METHODS.iteritems(): # check for statements that are uniquely defined by their keywords if rest.startswith(key): # has to startwith this keyword and the next letter after keyword must be either EOF or not in IDENTIFIER_PART if len(key)==len(rest) or rest[len(key)] not in IDENTIFIER_PART: return meth(source, start) if rest[0] == '{': #Block return do_block(source, start) # Now only label and expression left cand = parse_identifier(source, start, False) if cand is not None: # it can mean that its a label label, cand_start = cand cand_start = pass_white(source, cand_start) if source[cand_start]==':': return do_label(source, start) return do_expression(source, start) def do_while(source, start): start += 5 # pass while bra, start = do_bracket_exp(source, start, throw=True) statement, start = do_statement(source, start) if statement is None: raise SyntaxError('Missing statement to execute in while loop!') return 'while %s:\n'%bra + indent(statement), start def do_dowhile(source, start): start += 2 # pass do statement, start = do_statement(source, start) if statement is None: raise SyntaxError('Missing statement to execute in do while loop!') start = except_keyword(source, start, 'while') if not start: raise SyntaxError('Missing while keyword in do-while loop') bra, start = do_bracket_exp(source, start, throw=True) statement += 'if not %s:\n' % bra + indent('break\n') return 'while 1:\n' + indent(statement), start def do_block(source, start): bra, start = pass_bracket(source, start, '{}') #print source[start:], bra #return bra +'\n', start if bra is None: raise SyntaxError('Missing block ( {code} )') code = '' bra = bra[1:-1]+';' bra_pos = 0 while bra_pos<len(bra): st, bra_pos = do_statement(bra, bra_pos) if st is None: break code += st bra_pos = pass_white(bra, bra_pos) if bra_pos<len(bra): raise SyntaxError('Block has more code that could not be parsed:\n'+bra[bra_pos:]) return code, start def do_empty(source, start): return 'pass\n', start + 1 def do_expression(source, start): start = pass_white(source, start) end = pass_until(source, start, tokens=(';',)) if end==start+1: #empty statement return 'pass\n', end # AUTOMATIC SEMICOLON INSERTION FOLLOWS # Without ASI this function would end with: return exp_translator(source[start:end].rstrip(';'))+'\n', end # ASI makes things a bit more complicated: # we will try to parse as much as possible, inserting ; in place of last new line in case of error rev = False rpos = 0 while True: try: code = source[start:end].rstrip(';') cand = exp_translator(code)+'\n', end just_to_test = compile(cand[0], '', 'exec') return cand except Exception as e: if not rev: rev = source[start:end][::-1] lpos = rpos while True: rpos = pass_until(rev, rpos, LINE_TERMINATOR) if rpos>=len(rev): raise if filter(lambda x: x not in SPACE, rev[lpos:rpos]): break end = start + len(rev) - rpos + 1 def do_var(source, start): #todo auto ; insertion start += 3 #pass var end = pass_until(source, start, tokens=(';',)) defs = argsplit(source[start:end-1]) # defs is the list of defined vars with optional initializer code = '' for de in defs: var, var_end = parse_identifier(de, 0, True) TO_REGISTER.append(var) var_end = pass_white(de, var_end) if var_end<len(de): # we have something more to parse... It has to start with = if de[var_end] != '=': raise SyntaxError('Unexpected initializer in var statement. Expected "=", got "%s"'%de[var_end]) code += exp_translator(de) + '\n' if not code.strip(): code = 'pass\n' return code, end def do_label(source, start): label, end = parse_identifier(source, start) end = pass_white(source, end) #now source[end] must be : assert source[end]==':' end += 1 inside, end = do_statement(source, end) if inside is None: raise SyntaxError('Missing statement after label') defs = '' if inside.startswith('while ') or inside.startswith('for ') or inside.startswith('#for'): # we have to add contine label as well... # 3 or 1 since #for loop type has more lines before real for. sep = 1 if not inside.startswith('#for') else 3 cont_label = get_continue_label(label) temp = inside.split('\n') injected = 'try:\n'+'\n'.join(temp[sep:]) injected += 'except %s:\n pass\n'%cont_label inside = '\n'.join(temp[:sep])+'\n'+indent(injected) defs += 'class %s(Exception): pass\n'%cont_label break_label = get_break_label(label) inside = 'try:\n%sexcept %s:\n pass\n'% (indent(inside), break_label) defs += 'class %s(Exception): pass\n'%break_label return defs + inside, end def do_for(source, start): start += 3 # pass for entered = start bra, start = pass_bracket(source, start , '()') inside, start = do_statement(source, start) if inside is None: raise SyntaxError('Missing statement after for') bra = bra[1:-1] if ';' in bra: init = argsplit(bra, ';') if len(init)!=3: raise SyntaxError('Invalid for statement') args = [] for i, item in enumerate(init): end = pass_white(item, 0) if end==len(item): args.append('' if i!=1 else '1') continue if not i and except_keyword(item, end, 'var') is not None: # var statement args.append(do_var(item, end)[0]) continue args.append(do_expression(item, end)[0]) return '#for JS loop\n%swhile %s:\n%s%s\n' %(args[0], args[1].strip(), indent(inside), indent(args[2])), start # iteration end = pass_white(bra, 0) register = False if bra[end:].startswith('var '): end+=3 end = pass_white(bra, end) register = True name, end = parse_identifier(bra, end) if register: TO_REGISTER.append(name) end = pass_white(bra, end) if bra[end:end+2]!='in' or bra[end+2] in IDENTIFIER_PART: #print source[entered-10:entered+50] raise SyntaxError('Invalid "for x in y" statement') end+=2 # pass in exp = exp_translator(bra[end:]) res = 'for temp in %s:\n' % exp res += indent('var.put(%s, temp)\n' % name.__repr__()) + indent(inside) return res, start # todo - IMPORTANT def do_continue(source, start, name='continue'): start += len(name) #pass continue start = pass_white(source, start) if start<len(source) and source[start] == ';': return '%s\n'%name, start+1 # labeled statement or error label, start = parse_identifier(source, start) start = pass_white(source, start) if start<len(source) and source[start] != ';': raise SyntaxError('Missing ; after label name in %s statement'%name) return 'raise %s("%s")\n' % (get_continue_label(label) if name=='continue' else get_break_label(label), name), start+1 def do_break(source, start): return do_continue(source, start, 'break') def do_return(source, start): start += 6 # pass return end = source.find(';', start)+1 if end==-1: end = len(source) trans = exp_translator(source[start:end].rstrip(';')) return 'return %s\n' % (trans if trans else "var.get('undefined')"), end # todo later?- Also important def do_throw(source, start): start += 5 # pass throw end = source.find(';', start)+1 if not end: end = len(source) trans = exp_translator(source[start:end].rstrip(';')) if not trans: raise SyntaxError('Invalid throw statement: nothing to throw') res = 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans return res, end def do_try(source, start): start += 3 # pass try block, start = do_block(source, start) result = 'try:\n%s' %indent(block) catch = except_keyword(source, start, 'catch') if catch: bra, catch = pass_bracket(source, catch, '()') bra = bra[1:-1] identifier, bra_end = parse_identifier(bra, 0) holder = 'PyJsHolder_%s_%d'%(identifier.encode('hex'), random.randrange(1e8)) identifier = identifier.__repr__() bra_end = pass_white(bra, bra_end) if bra_end<len(bra): raise SyntaxError('Invalid content of catch statement') result += 'except PyJsException as PyJsTempException:\n' block, catch = do_block(source, catch) # fill in except ( catch ) block and remember to recover holder variable to its previous state result += indent(TRY_CATCH.replace('HOLDER', holder).replace('NAME', identifier).replace('BLOCK', indent(block))) start = max(catch, start) final = except_keyword(source, start, 'finally') if not (final or catch): raise SyntaxError('Try statement has to be followed by catch or finally') if not final: return result, start # translate finally statement block, start = do_block(source, final) return result + 'finally:\n%s' % indent(block), start def do_debugger(source, start): start += 8 # pass debugger end = pass_white(source, start) if end<len(source) and source[end]==';': end += 1 return 'pass\n', end #ignore errors... # todo automatic ; insertion. fuck this crappy feature # Least important def do_switch(source, start): start += 6 # pass switch code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n') # parse value of check val, start = pass_bracket(source, start, '()') if val is None: raise SyntaxError('Missing () after switch statement') if not val.strip(): raise SyntaxError('Missing content inside () after switch statement') code = code % exp_translator(val) bra, start = pass_bracket(source, start, '{}') if bra is None: raise SyntaxError('Missing block {} after switch statement') bra_pos = 0 bra = bra[1:-1] + ';' while True: case = except_keyword(bra, bra_pos, 'case') default = except_keyword(bra, bra_pos, 'default') assert not (case and default) if case or default: # this ?: expression makes things much harder.... case_code = None if case: case_code = 'if SWITCHED or PyJsStrictEq(CONDITION, %s):\n' # we are looking for a first : with count 1. ? gives -1 and : gives +1. count = 0 for pos, e in enumerate(bra[case:], case): if e=='?': count -= 1 elif e==':': count += 1 if count==1: break else: raise SyntaxError('Missing : token after case in switch statement') case_condition = exp_translator(bra[case:pos]) # switch {case CONDITION: statements} case_code = case_code % case_condition case = pos + 1 if default: case = except_token(bra, default, ':') case_code = 'if True:\n' # now parse case statements (things after ':' ) cand, case = do_statement(bra, case) while cand: case_code += indent(cand) cand, case = do_statement(bra, case) case_code += indent('SWITCHED = True\n') code += indent(case_code) bra_pos = case else: break # prevent infinite loop :) code += indent('break\n') return code, start def do_pyimport(source, start): start += 8 lib, start = parse_identifier(source, start) jlib = 'PyImport_%s' % lib code = 'import %s as %s\n' % (lib, jlib) #check whether valid lib name... try: compile(code, '', 'exec') except: raise SyntaxError('Invalid Python module name (%s) in pyimport statement'%lib) # var.pyimport will handle module conversion to PyJs object code += 'var.pyimport(%s, %s)\n' % (repr(lib), jlib) return code, start def do_with(source, start): raise NotImplementedError('With statement is not implemented yet :(') KEYWORD_METHODS = {'do': do_dowhile, 'while': do_while, 'if': do_if, 'throw': do_throw, 'return': do_return, 'continue': do_continue, 'break': do_break, 'try': do_try, 'for': do_for, 'switch': do_switch, 'var': do_var, 'debugger': do_debugger, # this one does not do anything 'with': do_with, 'pyimport': do_pyimport } #Also not specific statements (harder to detect) # Block {} # Expression or Empty Statement # Label # # Its easy to recognize block but harder to distinguish between label and expression statement def translate_flow(source): """Source cant have arrays, object, constant or function literals. Returns PySource and variables to register""" global TO_REGISTER TO_REGISTER = [] return do_block('{%s}'%source, 0)[0], TO_REGISTER if __name__=='__main__': #print do_dowhile('do {} while(k+f)', 0)[0] #print 'e: "%s"'%do_expression('++(c?g:h); mj', 0)[0] print translate_flow('a; yimport test')[0]
16,471
Python
.py
397
33.836272
122
0.609491
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,829
nodevisitor.py
evilhero_mylar/lib/js2py/legecy_translators/nodevisitor.py
from jsparser import * from utils import * import re from utils import * #Note all white space sent to this module must be ' ' so no '\n' REPL = {} #PROBLEMS # <<=, >>=, >>>= # they are unusual so I will not fix that now. a++ +b works fine and a+++++b (a++ + ++b) does not work even in V8 ASSIGNMENT_MATCH = '(?<!=|!|<|>)=(?!=)' def unary_validitator(keyword, before, after): if keyword[-1] in IDENTIFIER_PART: if not after or after[0] in IDENTIFIER_PART: return False if before and before[-1] in IDENTIFIER_PART: # I am not sure here... return False return True def comb_validitator(keyword, before, after): if keyword=='instanceof' or keyword=='in': if before and before[-1] in IDENTIFIER_PART: return False elif after and after[0] in IDENTIFIER_PART: return False return True def bracket_replace(code): new = '' for e in bracket_split(code, ['()','[]'], False): if e[0]=='[': name = '#PYJSREPL'+str(len(REPL))+'{' new+= name REPL[name] = e elif e[0]=='(': # can be a function call name = '@PYJSREPL'+str(len(REPL))+'}' new+= name REPL[name] = e else: new+=e return new class NodeVisitor: def __init__(self, code): self.code = code def rl(self, lis, op): """performs this operation on a list from *right to left* op must take 2 args a,b,c => op(a, op(b, c))""" it = reversed(lis) res = trans(it.next()) for e in it: e = trans(e) res = op(e, res) return res def lr(self, lis, op): """performs this operation on a list from *left to right* op must take 2 args a,b,c => op(op(a, b), c)""" it = iter(lis) res = trans(it.next()) for e in it: e = trans(e) res = op(res, e) return res def translate(self): """Translates outer operation and calls translate on inner operation. Returns fully translated code.""" if not self.code: return '' new = bracket_replace(self.code) #Check comma operator: cand = new.split(',') #every comma in new must be an operator if len(cand)>1: #LR return self.lr(cand, js_comma) #Check = operator: # dont split at != or !== or == or === or <= or >= #note <<=, >>= or this >>> will NOT be supported # maybe I will change my mind later # Find this crappy ?: if '?' in new: cond_ind = new.find('?') tenary_start = 0 for ass in re.finditer(ASSIGNMENT_MATCH, new): cand = ass.span()[1] if cand < cond_ind: tenary_start = cand else: break actual_tenary = new[tenary_start:] spl = ''.join(split_at_any(new, [':', '?'], translate=trans)) tenary_translation = transform_crap(spl) assignment = new[:tenary_start] + ' PyJsConstantTENARY' return trans(assignment).replace('PyJsConstantTENARY', tenary_translation) cand = list(split_at_single(new, '=', ['!', '=','<','>'], ['='])) if len(cand)>1: # RL it = reversed(cand) res = trans(it.next()) for e in it: e = e.strip() if not e: raise SyntaxError('Missing left-hand in assignment!') op = '' if e[-2:] in OP_METHODS: op = ','+e[-2:].__repr__() e = e[:-2] elif e[-1:] in OP_METHODS: op = ','+e[-1].__repr__() e = e[:-1] e = trans(e) #Now replace last get method with put and change args c = list(bracket_split(e, ['()'])) beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip() #strips just to make sure... I will remove it later if beg[-4:]!='.get': raise SyntaxError('Invalid left-hand side in assignment') beg = beg[0:-3]+'put' arglist = arglist[0:-1]+', '+res+op+')' res = beg+arglist return res #Now check remaining 2 arg operators that are not handled by python #They all have Left to Right (LR) associativity order = [OR, AND, BOR, BXOR, BAND, EQS, COMPS, BSHIFTS, ADDS, MULTS] # actually we dont need OR and AND because they can be handled easier. But just for fun dangerous = ['<', '>'] for typ in order: #we have to use special method for ADDS since they can be also unary operation +/++ or -/-- FUCK if '+' in typ: cand = list(split_add_ops(new)) else: #dont translate. cant start or end on dangerous op. cand = list(split_at_any(new, typ.keys(), False, dangerous, dangerous,validitate=comb_validitator)) if not len(cand)>1: continue n = 1 res = trans(cand[0]) if not res: raise SyntaxError("Missing operand!") while n<len(cand): e = cand[n] if not e: raise SyntaxError("Missing operand!") if n%2: op = typ[e] else: res = op(res, trans(e)) n+=1 return res #Now replace unary operators - only they are left cand = list(split_at_any(new, UNARY.keys(), False, validitate=unary_validitator)) if len(cand)>1: #contains unary operators if '++' in cand or '--' in cand: #it cant contain both ++ and -- if '--' in cand: op = '--' meths = js_post_dec, js_pre_dec else: op = '++' meths = js_post_inc, js_pre_inc pos = cand.index(op) if cand[pos-1].strip(): # post increment a = cand[pos-1] meth = meths[0] elif cand[pos+1].strip(): #pre increment a = cand[pos+1] meth = meths[1] else: raise SyntaxError('Invalid use of ++ operator') if cand[pos+2:]: raise SyntaxError('Too many operands') operand = meth(trans(a)) cand = cand[:pos-1] # now last cand should be operand and every other odd element should be empty else: operand = trans(cand[-1]) del cand[-1] for i, e in enumerate(reversed(cand)): if i%2: if e.strip(): raise SyntaxError('Too many operands') else: operand = UNARY[e](operand) return operand #Replace brackets if new[0]=='@' or new[0]=='#': if len(list(bracket_split(new, ('#{','@}')))) ==1: # we have only one bracket, otherwise pseudobracket like @@.... assert new in REPL if new[0]=='#': raise SyntaxError('[] cant be used as brackets! Use () instead.') return '('+trans(REPL[new][1:-1])+')' #Replace function calls and prop getters # 'now' must be a reference like: a or b.c.d but it can have also calls or getters ( for example a["b"](3)) #From here @@ means a function call and ## means get operation (note they dont have to present) it = bracket_split(new, ('#{','@}')) res = [] for e in it: if e[0]!='#' and e[0]!='@': res += [x.strip() for x in e.split('.')] else: res += [e.strip()] # res[0] can be inside @@ (name)... res = filter(lambda x: x, res) if is_internal(res[0]): out = res[0] elif res[0][0] in {'#', '@'}: out = '('+trans(REPL[res[0]][1:-1])+')' elif is_valid_lval(res[0]) or res[0] in {'this', 'false', 'true', 'null'}: out = 'var.get('+res[0].__repr__()+')' else: if is_reserved(res[0]): raise SyntaxError('Unexpected reserved word: "%s"'%res[0]) raise SyntaxError('Invalid identifier: "%s"'%res[0]) if len(res)==1: return out n = 1 while n<len(res): #now every func call is a prop call e = res[n] if e[0]=='@': # direct call out += trans_args(REPL[e]) n += 1 continue args = False #assume not prop call if n+1<len(res) and res[n+1][0]=='@': #prop call args = trans_args(REPL[res[n+1]])[1:] if args!=')': args = ','+args if e[0]=='#': prop = trans(REPL[e][1:-1]) else: if not is_lval(e): raise SyntaxError('Invalid identifier: "%s"'%e) prop = e.__repr__() if args: # prop call n+=1 out += '.callprop('+prop+args else: #prop get out += '.get('+prop+')' n+=1 return out def js_comma(a, b): return 'PyJsComma('+a+','+b+')' def js_or(a, b): return '('+a+' or '+b+')' def js_bor(a, b): return '('+a+'|'+b+')' def js_bxor(a, b): return '('+a+'^'+b+')' def js_band(a, b): return '('+a+'&'+b+')' def js_and(a, b): return '('+a+' and '+b+')' def js_strict_eq(a, b): return 'PyJsStrictEq('+a+','+b+')' def js_strict_neq(a, b): return 'PyJsStrictNeq('+a+','+b+')' #Not handled by python in the same way like JS. For example 2==2==True returns false. # In JS above would return true so we need brackets. def js_abstract_eq(a, b): return '('+a+'=='+b+')' #just like == def js_abstract_neq(a, b): return '('+a+'!='+b+')' def js_lt(a, b): return '('+a+'<'+b+')' def js_le(a, b): return '('+a+'<='+b+')' def js_ge(a, b): return '('+a+'>='+b+')' def js_gt(a, b): return '('+a+'>'+b+')' def js_in(a, b): return b+'.contains('+a+')' def js_instanceof(a, b): return a+'.instanceof('+b+')' def js_lshift(a, b): return '('+a+'<<'+b+')' def js_rshift(a, b): return '('+a+'>>'+b+')' def js_shit(a, b): return 'PyJsBshift('+a+','+b+')' def js_add(a, b): # To simplify later process of converting unary operators + and ++ return '(%s+%s)'%(a, b) def js_sub(a, b): # To simplify return '(%s-%s)'%(a, b) def js_mul(a, b): return '('+a+'*'+b+')' def js_div(a, b): return '('+a+'/'+b+')' def js_mod(a, b): return '('+a+'%'+b+')' def js_typeof(a): cand = list(bracket_split(a, ('()',))) if len(cand)==2 and cand[0]=='var.get': return cand[0]+cand[1][:-1]+',throw=False).typeof()' return a+'.typeof()' def js_void(a): return '('+a+')' def js_new(a): cands = list(bracket_split(a, ('()',))) lim = len(cands) if lim < 2: return a + '.create()' n = 0 while n < lim: c = cands[n] if c[0]=='(': if cands[n-1].endswith('.get') and n+1>=lim: # last get operation. return a + '.create()' elif cands[n-1][0]=='(': return ''.join(cands[:n])+'.create' + c + ''.join(cands[n+1:]) elif cands[n-1]=='.callprop': beg = ''.join(cands[:n-1]) args = argsplit(c[1:-1],',') prop = args[0] new_args = ','.join(args[1:]) create = '.get(%s).create(%s)' % (prop, new_args) return beg + create + ''.join(cands[n+1:]) n+=1 return a + '.create()' def js_delete(a): #replace last get with delete. c = list(bracket_split(a, ['()'])) beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip() #strips just to make sure... I will remove it later if beg[-4:]!='.get': raise SyntaxError('Invalid delete operation') return beg[:-3]+'delete'+arglist def js_neg(a): return '(-'+a+')' def js_pos(a): return '(+'+a+')' def js_inv(a): return '(~'+a+')' def js_not(a): return a+'.neg()' def postfix(a, inc, post): bra = list(bracket_split(a, ('()',))) meth = bra[-2] if not meth.endswith('get'): raise SyntaxError('Invalid ++ or -- operation.') bra[-2] = bra[-2][:-3] + 'put' bra[-1] = '(%s,%s%sJs(1))' % (bra[-1][1:-1], a, '+' if inc else '-') res = ''.join(bra) return res if not post else '(%s%sJs(1))' % (res, '-' if inc else '+') def js_pre_inc(a): return postfix(a, True, False) def js_post_inc(a): return postfix(a, True, True) def js_pre_dec(a): return postfix(a, False, False) def js_post_dec(a): return postfix(a, False, True) OR = {'||': js_or} AND = {'&&': js_and} BOR = {'|': js_bor} BXOR = {'^': js_bxor} BAND = {'&': js_band} EQS = {'===': js_strict_eq, '!==': js_strict_neq, '==': js_abstract_eq, # we need == and != too. Read a note above method '!=': js_abstract_neq} #Since JS does not have chained comparisons we need to implement all cmp methods. COMPS = {'<': js_lt, '<=': js_le, '>=': js_ge, '>': js_gt, 'instanceof': js_instanceof, #todo change to validitate 'in': js_in} BSHIFTS = {'<<': js_lshift, '>>': js_rshift, '>>>': js_shit} ADDS = {'+': js_add, '-': js_sub} MULTS = {'*': js_mul, '/': js_div, '%': js_mod} #Note they dont contain ++ and -- methods because they both have 2 different methods # correct method will be found automatically in translate function UNARY = {'typeof': js_typeof, 'void': js_void, 'new': js_new, 'delete': js_delete, '!': js_not, '-': js_neg, '+': js_pos, '~': js_inv, '++': None, '--': None } def transform_crap(code): #needs some more tests """Transforms this ?: crap into if else python syntax""" ind = code.rfind('?') if ind==-1: return code sep = code.find(':', ind) if sep==-1: raise SyntaxError('Invalid ?: syntax (probably missing ":" )') beg = max(code.rfind(':', 0, ind), code.find('?', 0, ind))+1 end = code.find(':',sep+1) end = len(code) if end==-1 else end formula = '('+code[ind+1:sep]+' if '+code[beg:ind]+' else '+code[sep+1:end]+')' return transform_crap(code[:beg]+formula+code[end:]) from code import InteractiveConsole #e = InteractiveConsole(globals()).interact() import traceback def trans(code): return NodeVisitor(code.strip()).translate().strip() #todo finish this trans args def trans_args(code): new = bracket_replace(code.strip()[1:-1]) args = ','.join(trans(e) for e in new.split(',')) return '(%s)'%args EXP = 0 def exp_translator(code): global REPL, EXP EXP += 1 REPL = {} #print EXP, code code = code.replace('\n', ' ') assert '@' not in code assert ';' not in code assert '#' not in code #if not code.strip(): #? # return 'var.get("undefined")' try: return trans(code) except: #print '\n\ntrans failed on \n\n' + code #raw_input('\n\npress enter') raise if __name__=='__main__': #print 'Here', trans('(eee ) . ii [ PyJsMarker ] [ jkj ] ( j , j ) . # jiji (h , ji , i)(non )( )()()()') for e in xrange(3): print exp_translator('jk = kk.ik++') #First line translated with PyJs: PyJsStrictEq(PyJsAdd((Js(100)*Js(50)),Js(30)), Js("5030")), yay! print exp_translator('delete a.f')
15,965
Python
.py
430
27.693023
126
0.495183
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,830
constants.py
evilhero_mylar/lib/js2py/legecy_translators/constants.py
from string import ascii_lowercase, digits ################################## StringName = u'PyJsConstantString%d_' NumberName = u'PyJsConstantNumber%d_' RegExpName = u'PyJsConstantRegExp%d_' ################################## ALPHAS = set(ascii_lowercase+ ascii_lowercase.upper()) NUMS = set(digits) IDENTIFIER_START = ALPHAS.union(NUMS) ESCAPE_CHARS = {'n', '0', 'b', 'f', 'r', 't', 'v', '"', "'", '\\'} OCTAL = {'0', '1', '2', '3', '4', '5', '6', '7'} HEX = set('0123456789abcdefABCDEF') from utils import * IDENTIFIER_PART = IDENTIFIER_PART.union({'.'}) def _is_cancelled(source, n): cancelled = False k = 0 while True: k+=1 if source[n-k]!='\\': break cancelled = not cancelled return cancelled def _ensure_regexp(source, n): #<- this function has to be improved '''returns True if regexp starts at n else returns False checks whether it is not a division ''' markers = '(+~"\'=[%:?!*^|&-,;/\\' k = 0 while True: k+=1 if n-k<0: return True char = source[n-k] if char in markers: return True if char!=' ' and char!='\n': break return False def parse_num(source, start, charset): """Returns a first index>=start of chat not in charset""" while start<len(source) and source[start] in charset: start+=1 return start def parse_exponent(source, start): """returns end of exponential, raises SyntaxError if failed""" if not source[start] in {'e', 'E'}: if source[start] in IDENTIFIER_PART: raise SyntaxError('Invalid number literal!') return start start += 1 if source[start] in {'-', '+'}: start += 1 FOUND = False # we need at least one dig after exponent while source[start] in NUMS: FOUND = True start+=1 if not FOUND or source[start] in IDENTIFIER_PART: raise SyntaxError('Invalid number literal!') return start def remove_constants(source): '''Replaces Strings and Regexp literals in the source code with identifiers and *removes comments*. Identifier is of the format: PyJsStringConst(String const number)_ - for Strings PyJsRegExpConst(RegExp const number)_ - for RegExps Returns dict which relates identifier and replaced constant. Removes single line and multiline comments from JavaScript source code Pseudo comments (inside strings) will not be removed. For example this line: var x = "/*PSEUDO COMMENT*/ TEXT //ANOTHER PSEUDO COMMENT" will be unaltered''' source=' '+source+'\n' comments = [] inside_comment, single_comment = False, False inside_single, inside_double = False, False inside_regexp = False regexp_class_count = 0 n = 0 while n < len(source): char = source[n] if char=='"' and not (inside_comment or inside_single or inside_regexp): if not _is_cancelled(source, n): if inside_double: inside_double[1] = n+1 comments.append(inside_double) inside_double = False else: inside_double = [n, None, 0] elif char=="'" and not (inside_comment or inside_double or inside_regexp): if not _is_cancelled(source, n): if inside_single: inside_single[1] = n+1 comments.append(inside_single) inside_single = False else: inside_single = [n, None, 0] elif (inside_single or inside_double): if char in LINE_TERMINATOR: if _is_cancelled(source, n): if char==CR and source[n+1]==LF: n+=1 n+=1 continue else: raise SyntaxError('Invalid string literal. Line terminators must be escaped!') else: if inside_comment: if single_comment: if char in LINE_TERMINATOR: inside_comment[1] = n comments.append(inside_comment) inside_comment = False single_comment = False else: # Multiline if char=='/' and source[n-1]=='*': inside_comment[1] = n+1 comments.append(inside_comment) inside_comment = False elif inside_regexp: if not quiting_regexp: if char in LINE_TERMINATOR: raise SyntaxError('Invalid regexp literal. Line terminators cant appear!') if _is_cancelled(source, n): n+=1 continue if char=='[': regexp_class_count += 1 elif char==']': regexp_class_count = max(regexp_class_count-1, 0) elif char=='/' and not regexp_class_count: quiting_regexp = True else: if char not in IDENTIFIER_START: inside_regexp[1] = n comments.append(inside_regexp) inside_regexp = False elif char=='/' and source[n-1]=='/': single_comment = True inside_comment = [n-1, None, 1] elif char=='*' and source[n-1]=='/': inside_comment = [n-1, None, 1] elif char=='/' and source[n+1] not in ('/', '*'): if not _ensure_regexp(source, n): #<- improve this one n+=1 continue #Probably just a division quiting_regexp = False inside_regexp = [n, None, 2] elif not (inside_comment or inside_regexp): if (char in NUMS and source[n-1] not in IDENTIFIER_PART) or char=='.': if char=='.': k = parse_num(source,n+1, NUMS) if k==n+1: # just a stupid dot... n+=1 continue k = parse_exponent(source, k) elif char=='0' and source[n+1] in {'x', 'X'}: #Hex number probably k = parse_num(source, n+2, HEX) if k==n+2 or source[k] in IDENTIFIER_PART: raise SyntaxError('Invalid hex literal!') else: #int or exp or flot or exp flot k = parse_num(source, n+1, NUMS) if source[k]=='.': k = parse_num(source, k+1, NUMS) k = parse_exponent(source, k) comments.append((n, k, 3)) n = k continue n+=1 res = '' start = 0 count = 0 constants = {} for end, next_start, typ in comments: res += source[start:end] start = next_start if typ==0: # String name = StringName elif typ==1: # comment continue elif typ==2: # regexp name = RegExpName elif typ==3: # number name = NumberName else: raise RuntimeError() res += ' '+name % count+' ' constants[name % count] = source[end: next_start] count += 1 res+=source[start:] # remove this stupid white space for e in WHITE: res = res.replace(e, ' ') res = res.replace(CR+LF, '\n') for e in LINE_TERMINATOR: res = res.replace(e, '\n') return res.strip(), constants def recover_constants(py_source, replacements): #now has n^2 complexity. improve to n '''Converts identifiers representing Js constants to the PyJs constants PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)''' for identifier, value in replacements.iteritems(): if identifier.startswith('PyJsConstantRegExp'): py_source = py_source.replace(identifier, 'JsRegExp(%s)'%repr(value)) elif identifier.startswith('PyJsConstantString'): py_source = py_source.replace(identifier, 'Js(u%s)' % unify_string_literals(value)) else: py_source = py_source.replace(identifier, 'Js(%s)'%value) return py_source def unify_string_literals(js_string): """this function parses the string just like javascript for example literal '\d' in JavaScript would be interpreted as 'd' - backslash would be ignored and in Pyhon this would be interpreted as '\\d' This function fixes this problem.""" n = 0 res = '' limit = len(js_string) while n < limit: char = js_string[n] if char=='\\': new, n = do_escape(js_string, n) res += new else: res += char n += 1 return res def unify_regexp_literals(js): pass def do_escape(source, n): """Its actually quite complicated to cover every case :) http://www.javascriptkit.com/jsref/escapesequence.shtml""" if not n+1 < len(source): return '' # not possible here but can be possible in general case. if source[n+1] in LINE_TERMINATOR: if source[n+1]==CR and n+2<len(source) and source[n+2]==LF: return source[n:n+3], n+3 return source[n:n+2], n+2 if source[n+1] in ESCAPE_CHARS: return source[n:n+2], n+2 if source[n+1]in {'x', 'u'}: char, length = ('u', 4) if source[n+1]=='u' else ('x', 2) n+=2 end = parse_num(source, n, HEX) if end-n < length: raise SyntaxError('Invalid escape sequence!') #if length==4: # return unichr(int(source[n:n+4], 16)), n+4 # <- this was a very bad way of solving this problem :) return source[n-2:n+length], n+length if source[n+1] in OCTAL: n += 1 end = parse_num(source, n, OCTAL) end = min(end, n+3) # cant be longer than 3 # now the max allowed is 377 ( in octal) and 255 in decimal max_num = 255 num = 0 len_parsed = 0 for e in source[n:end]: cand = 8*num + int(e) if cand > max_num: break num = cand len_parsed += 1 # we have to return in a different form because python may want to parse more... # for example '\777' will be parsed by python as a whole while js will use only \77 return '\\' + hex(num)[1:], n + len_parsed return source[n+1], n+2 #####TEST###### if __name__=='__main__': test = (''' ''') t, d = remove_constants(test) print t, d
10,888
Python
.py
270
28.940741
111
0.530345
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,831
exps.py
evilhero_mylar/lib/js2py/legecy_translators/exps.py
""" exp_translate routine: It takes a single line of JS code and returns a SINGLE line of Python code. Note var is not present here because it was removed in previous stages. Also remove this useless void keyword If case of parsing errors it must return a pos of error. 1. Convert all assignment operations to put operations, this may be hard :( DONE, wasn't that bad 2. Convert all gets and calls to get and callprop. 3. Convert unary operators like typeof, new, !, delete, ++, -- Delete can be handled by replacing last get method with delete. 4. Convert remaining operators that are not handled by python: &&, || <= these should be easy simply replace && by and and || by or === and !== comma operator , in, instanceof and finally :? NOTES: Strings and other literals are not present so each = means assignment """ from utils import * from jsparser import * def exps_translator(js): #Check () {} and [] nums ass = assignment_translator(js) # Step 1 def assignment_translator(js): sep = js.split(',') res = sep[:] for i, e in enumerate(sep): if '=' not in e: # no need to convert continue res[i] = bass_translator(e) return ','.join(res) def bass_translator(s): # I hope that I will not have to fix any bugs here because it will be terrible if '(' in s or '[' in s: converted = '' for e in bracket_split(s, ['()','[]'], strip=False): if e[0]=='(': converted += '(' + bass_translator(e[1:-1])+')' elif e[0]=='[': converted += '[' + bass_translator(e[1:-1])+']' else: converted += e s = converted if '=' not in s: return s ass = reversed(s.split('=')) last = ass.next() res = last for e in ass: op = '' if e[-1] in OP_METHODS: #increment assign like += op = ', "'+e[-1]+'"' e = e[:-1] cand = e.strip('() ') # (a) = 40 is valid so we need to transform '(a) ' to 'a' if not is_property_accessor(cand): # it is not a property assignment if not is_lval(cand) or is_internal(cand): raise SyntaxError('Invalid left-hand side in assignment') res = 'var.put(%s, %s%s)'%(cand.__repr__(), res, op) elif cand[-1]==']': # property assignment via [] c = list(bracket_split(cand, ['[]'], strip=False)) meth, prop = ''.join(c[:-1]).strip(), c[-1][1:-1].strip() #this does not have to be a string so dont remove #() because it can be a call res = '%s.put(%s, %s%s)'%(meth, prop, res, op) else: # Prop set via '.' c = cand.rfind('.') meth, prop = cand[:c].strip(), cand[c+1:].strip('() ') if not is_lval(prop): raise SyntaxError('Invalid left-hand side in assignment') res = '%s.put(%s, %s%s)'%(meth, prop.__repr__(), res, op) return res if __name__=='__main__': print bass_translator('3.ddsd = 40')
3,097
Python
.py
71
35.464789
119
0.562438
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,832
utils.py
evilhero_mylar/lib/js2py/legecy_translators/utils.py
import sys import unicodedata from collections import defaultdict def is_lval(t): """Does not chceck whether t is not resticted or internal""" if not t: return False i = iter(t) if i.next() not in IDENTIFIER_START: return False return all(e in IDENTIFIER_PART for e in i) def is_valid_lval(t): """Checks whether t is valid JS identifier name (no keyword like var, function, if etc) Also returns false on internal""" if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES: return True return False def is_plval(t): return t.startswith('PyJsLval') def is_marker(t): return t.startswith('PyJsMarker') or t.startswith('PyJsConstant') def is_internal(t): return is_plval(t) or is_marker(t) or t=='var' # var is a scope var def is_property_accessor(t): return '[' in t or '.' in t def is_reserved(t): return t in RESERVED_NAMES #http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category BOM = u'\uFEFF' ZWJ = u'\u200D' ZWNJ = u'\u200C' TAB = u'\u0009' VT = u'\u000B' FF = u'\u000C' SP = u'\u0020' NBSP = u'\u00A0' LF = u'\u000A' CR = u'\u000D' LS = u'\u2028' PS = u'\u2029' U_CATEGORIES = defaultdict(list) # Thank you Martijn Pieters! for c in map(unichr, range(sys.maxunicode + 1)): U_CATEGORIES[unicodedata.category(c)].append(c) UNICODE_LETTER = set(U_CATEGORIES['Lu']+U_CATEGORIES['Ll']+ U_CATEGORIES['Lt']+U_CATEGORIES['Lm']+ U_CATEGORIES['Lo']+U_CATEGORIES['Nl']) UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn']+U_CATEGORIES['Mc']) UNICODE_DIGIT = set(U_CATEGORIES['Nd']) UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc']) IDENTIFIER_START = UNICODE_LETTER.union({'$','_'}) # and some fucking unicode escape sequence IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT).union(UNICODE_CONNECTOR_PUNCTUATION).union({ZWJ, ZWNJ}) USP = U_CATEGORIES['Zs'] KEYWORD = {'break', 'do', 'instanceof', 'typeof', 'case', 'else', 'new', 'var', 'catch', 'finally', 'return', 'void', 'continue', 'for', 'switch', 'while', 'debugger', 'function', 'this', 'with', 'default', 'if', 'throw', 'delete', 'in', 'try'} FUTURE_RESERVED_WORD = {'class', 'enum', 'extends', 'super', 'const', 'export', 'import'} RESERVED_NAMES = KEYWORD.union(FUTURE_RESERVED_WORD).union({'null', 'false', 'true'}) WHITE = {TAB, VT, FF, SP, NBSP, BOM}.union(USP) LINE_TERMINATOR = {LF, CR, LS, PS} LLINE_TERMINATOR = list(LINE_TERMINATOR) x = ''.join(WHITE)+''.join(LINE_TERMINATOR) SPACE = WHITE.union(LINE_TERMINATOR) LINE_TERMINATOR_SEQUENCE = LINE_TERMINATOR.union({CR+LF})
2,705
Python
.py
64
38.546875
141
0.677075
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,833
nparser.py
evilhero_mylar/lib/js2py/legecy_translators/nparser.py
# JUST FOR NOW, later I will write my own - much faster and better. # Copyright (C) 2013 Ariya Hidayat <ariya.hidayat@gmail.com> # Copyright (C) 2013 Thaddee Tyl <thaddee.tyl@gmail.com> # Copyright (C) 2012 Ariya Hidayat <ariya.hidayat@gmail.com> # Copyright (C) 2012 Mathias Bynens <mathias@qiwi.be> # Copyright (C) 2012 Joost-Wim Boekesteijn <joost-wim@boekesteijn.nl> # Copyright (C) 2012 Kris Kowal <kris.kowal@cixar.com> # Copyright (C) 2012 Yusuke Suzuki <utatane.tea@gmail.com> # Copyright (C) 2012 Arpad Borsos <arpad.borsos@googlemail.com> # Copyright (C) 2011 Ariya Hidayat <ariya.hidayat@gmail.com> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -*- coding: latin-1 -*- from __future__ import print_function import re def typeof(t): if t is None: return 'undefined' elif isinstance(t, bool): return 'boolean' elif isinstance(t, str): return 'string' elif isinstance(t, int) or isinstance(t, float): return 'number' elif hasattr(t, '__call__'): return 'function' else: return 'object' def list_indexOf(l, v): try: return l.index(v) except: return -1 parseFloat = float parseInt = int class jsdict(object): def __init__(self, d): self.__dict__.update(d) def __getitem__(self, name): if name in self.__dict__: return self.__dict__[name] else: return None def __setitem__(self, name, value): self.__dict__[name] = value return value def __getattr__(self, name): try: return getattr(self, name) except: return None def __setattr__(self, name, value): self[name] = value return value def __contains__(self, name): return name in self.__dict__ def __repr__(self): return str(self.__dict__) class RegExp(object): def __init__(self, pattern, flags=''): self.flags = flags pyflags = 0 | re.M if 'm' in flags else 0 | re.I if 'i' in flags else 0 self.source = pattern self.pattern = re.compile(pattern, pyflags) def test(self, s): return self.pattern.search(s) is not None console = jsdict({"log":print}) def __temp__42(object=None, body=None): return jsdict({ "type": Syntax.WithStatement, "object": object, "body": body, }) def __temp__41(test=None, body=None): return jsdict({ "type": Syntax.WhileStatement, "test": test, "body": body, }) def __temp__40(id=None, init=None): return jsdict({ "type": Syntax.VariableDeclarator, "id": id, "init": init, }) def __temp__39(declarations=None, kind=None): return jsdict({ "type": Syntax.VariableDeclaration, "declarations": declarations, "kind": kind, }) def __temp__38(operator=None, argument=None): if (operator == "++") or (operator == "--"): return jsdict({ "type": Syntax.UpdateExpression, "operator": operator, "argument": argument, "prefix": True, }) return jsdict({ "type": Syntax.UnaryExpression, "operator": operator, "argument": argument, "prefix": True, }) def __temp__37(block=None, guardedHandlers=None, handlers=None, finalizer=None): return jsdict({ "type": Syntax.TryStatement, "block": block, "guardedHandlers": guardedHandlers, "handlers": handlers, "finalizer": finalizer, }) def __temp__36(argument=None): return jsdict({ "type": Syntax.ThrowStatement, "argument": argument, }) def __temp__35(): return jsdict({ "type": Syntax.ThisExpression, }) def __temp__34(discriminant=None, cases=None): return jsdict({ "type": Syntax.SwitchStatement, "discriminant": discriminant, "cases": cases, }) def __temp__33(test=None, consequent=None): return jsdict({ "type": Syntax.SwitchCase, "test": test, "consequent": consequent, }) def __temp__32(expressions=None): return jsdict({ "type": Syntax.SequenceExpression, "expressions": expressions, }) def __temp__31(argument=None): return jsdict({ "type": Syntax.ReturnStatement, "argument": argument, }) def __temp__30(kind=None, key=None, value=None): return jsdict({ "type": Syntax.Property, "key": key, "value": value, "kind": kind, }) def __temp__29(body=None): return jsdict({ "type": Syntax.Program, "body": body, }) def __temp__28(operator=None, argument=None): return jsdict({ "type": Syntax.UpdateExpression, "operator": operator, "argument": argument, "prefix": False, }) def __temp__27(properties=None): return jsdict({ "type": Syntax.ObjectExpression, "properties": properties, }) def __temp__26(callee=None, args=None): return jsdict({ "type": Syntax.NewExpression, "callee": callee, "arguments": args, }) def __temp__25(accessor=None, object=None, property=None): return jsdict({ "type": Syntax.MemberExpression, "computed": accessor == "[", "object": object, "property": property, }) def __temp__24(token=None): return jsdict({ "type": Syntax.Literal, "value": token.value, "raw": source[token.range[0]:token.range[1]], }) def __temp__23(label=None, body=None): return jsdict({ "type": Syntax.LabeledStatement, "label": label, "body": body, }) def __temp__22(test=None, consequent=None, alternate=None): return jsdict({ "type": Syntax.IfStatement, "test": test, "consequent": consequent, "alternate": alternate, }) def __temp__21(name=None): return jsdict({ "type": Syntax.Identifier, "name": name, }) def __temp__20(id=None, params=None, defaults=None, body=None): return jsdict({ "type": Syntax.FunctionExpression, "id": id, "params": params, "defaults": defaults, "body": body, "rest": None, "generator": False, "expression": False, }) def __temp__19(id=None, params=None, defaults=None, body=None): return jsdict({ "type": Syntax.FunctionDeclaration, "id": id, "params": params, "defaults": defaults, "body": body, "rest": None, "generator": False, "expression": False, }) def __temp__18(left=None, right=None, body=None): return jsdict({ "type": Syntax.ForInStatement, "left": left, "right": right, "body": body, "each": False, }) def __temp__17(init=None, test=None, update=None, body=None): return jsdict({ "type": Syntax.ForStatement, "init": init, "test": test, "update": update, "body": body, }) def __temp__16(expression=None): return jsdict({ "type": Syntax.ExpressionStatement, "expression": expression, }) def __temp__15(): return jsdict({ "type": Syntax.EmptyStatement, }) def __temp__14(body=None, test=None): return jsdict({ "type": Syntax.DoWhileStatement, "body": body, "test": test, }) def __temp__13(): return jsdict({ "type": Syntax.DebuggerStatement, }) def __temp__12(label=None): return jsdict({ "type": Syntax.ContinueStatement, "label": label, }) def __temp__11(test=None, consequent=None, alternate=None): return jsdict({ "type": Syntax.ConditionalExpression, "test": test, "consequent": consequent, "alternate": alternate, }) def __temp__10(param=None, body=None): return jsdict({ "type": Syntax.CatchClause, "param": param, "body": body, }) def __temp__9(callee=None, args=None): return jsdict({ "type": Syntax.CallExpression, "callee": callee, "arguments": args, }) def __temp__8(label=None): return jsdict({ "type": Syntax.BreakStatement, "label": label, }) def __temp__7(body=None): return jsdict({ "type": Syntax.BlockStatement, "body": body, }) def __temp__6(operator=None, left=None, right=None): type = (Syntax.LogicalExpression if (operator == "||") or (operator == "&&") else Syntax.BinaryExpression) return jsdict({ "type": type, "operator": operator, "left": left, "right": right, }) def __temp__5(operator=None, left=None, right=None): return jsdict({ "type": Syntax.AssignmentExpression, "operator": operator, "left": left, "right": right, }) def __temp__4(elements=None): return jsdict({ "type": Syntax.ArrayExpression, "elements": elements, }) def __temp__3(node=None): if extra.source: node.loc.source = extra.source return node def __temp__2(node=None): if node.range or node.loc: if extra.loc: state.markerStack.pop() state.markerStack.pop() if extra.range: state.markerStack.pop() else: SyntaxTreeDelegate.markEnd(node) return node def __temp__1(node=None): if extra.range: node.range = [state.markerStack.pop(), index] if extra.loc: node.loc = jsdict({ "start": jsdict({ "line": state.markerStack.pop(), "column": state.markerStack.pop(), }), "end": jsdict({ "line": lineNumber, "column": index - lineStart, }), }) SyntaxTreeDelegate.postProcess(node) return node def __temp__0(): if extra.loc: state.markerStack.append(index - lineStart) state.markerStack.append(lineNumber) if extra.range: state.markerStack.append(index) Token = None TokenName = None FnExprTokens = None Syntax = None PropertyKind = None Messages = None Regex = None SyntaxTreeDelegate = None source = None strict = None index = None lineNumber = None lineStart = None length = None delegate = None lookahead = None state = None extra = None Token = jsdict({ "BooleanLiteral": 1, "EOF": 2, "Identifier": 3, "Keyword": 4, "NullLiteral": 5, "NumericLiteral": 6, "Punctuator": 7, "StringLiteral": 8, "RegularExpression": 9, }) TokenName = jsdict({ }) TokenName[Token.BooleanLiteral] = "Boolean" TokenName[Token.EOF] = "<end>" TokenName[Token.Identifier] = "Identifier" TokenName[Token.Keyword] = "Keyword" TokenName[Token.NullLiteral] = "Null" TokenName[Token.NumericLiteral] = "Numeric" TokenName[Token.Punctuator] = "Punctuator" TokenName[Token.StringLiteral] = "String" TokenName[Token.RegularExpression] = "RegularExpression" FnExprTokens = ["(", "{", "[", "in", "typeof", "instanceof", "new", "return", "case", "delete", "throw", "void", "=", "+=", "-=", "*=", "/=", "%=", "<<=", ">>=", ">>>=", "&=", "|=", "^=", ",", "+", "-", "*", "/", "%", "++", "--", "<<", ">>", ">>>", "&", "|", "^", "!", "~", "&&", "||", "?", ":", "===", "==", ">=", "<=", "<", ">", "!=", "!=="] Syntax = jsdict({ "AssignmentExpression": "AssignmentExpression", "ArrayExpression": "ArrayExpression", "BlockStatement": "BlockStatement", "BinaryExpression": "BinaryExpression", "BreakStatement": "BreakStatement", "CallExpression": "CallExpression", "CatchClause": "CatchClause", "ConditionalExpression": "ConditionalExpression", "ContinueStatement": "ContinueStatement", "DoWhileStatement": "DoWhileStatement", "DebuggerStatement": "DebuggerStatement", "EmptyStatement": "EmptyStatement", "ExpressionStatement": "ExpressionStatement", "ForStatement": "ForStatement", "ForInStatement": "ForInStatement", "FunctionDeclaration": "FunctionDeclaration", "FunctionExpression": "FunctionExpression", "Identifier": "Identifier", "IfStatement": "IfStatement", "Literal": "Literal", "LabeledStatement": "LabeledStatement", "LogicalExpression": "LogicalExpression", "MemberExpression": "MemberExpression", "NewExpression": "NewExpression", "ObjectExpression": "ObjectExpression", "Program": "Program", "Property": "Property", "ReturnStatement": "ReturnStatement", "SequenceExpression": "SequenceExpression", "SwitchStatement": "SwitchStatement", "SwitchCase": "SwitchCase", "ThisExpression": "ThisExpression", "ThrowStatement": "ThrowStatement", "TryStatement": "TryStatement", "UnaryExpression": "UnaryExpression", "UpdateExpression": "UpdateExpression", "VariableDeclaration": "VariableDeclaration", "VariableDeclarator": "VariableDeclarator", "WhileStatement": "WhileStatement", "WithStatement": "WithStatement", }) PropertyKind = jsdict({ "Data": 1, "Get": 2, "Set": 4, }) Messages = jsdict({ "UnexpectedToken": "Unexpected token %0", "UnexpectedNumber": "Unexpected number", "UnexpectedString": "Unexpected string", "UnexpectedIdentifier": "Unexpected identifier", "UnexpectedReserved": "Unexpected reserved word", "UnexpectedEOS": "Unexpected end of input", "NewlineAfterThrow": "Illegal newline after throw", "InvalidRegExp": "Invalid regular expression", "UnterminatedRegExp": "Invalid regular expression: missing /", "InvalidLHSInAssignment": "Invalid left-hand side in assignment", "InvalidLHSInForIn": "Invalid left-hand side in for-in", "MultipleDefaultsInSwitch": "More than one default clause in switch statement", "NoCatchOrFinally": "Missing catch or finally after try", "UnknownLabel": "Undefined label '%0'", "Redeclaration": "%0 '%1' has already been declared", "IllegalContinue": "Illegal continue statement", "IllegalBreak": "Illegal break statement", "IllegalReturn": "Illegal return statement", "StrictModeWith": "Strict mode code may not include a with statement", "StrictCatchVariable": "Catch variable may not be eval or arguments in strict mode", "StrictVarName": "Variable name may not be eval or arguments in strict mode", "StrictParamName": "Parameter name eval or arguments is not allowed in strict mode", "StrictParamDupe": "Strict mode function may not have duplicate parameter names", "StrictFunctionName": "Function name may not be eval or arguments in strict mode", "StrictOctalLiteral": "Octal literals are not allowed in strict mode.", "StrictDelete": "Delete of an unqualified identifier in strict mode.", "StrictDuplicateProperty": "Duplicate data property in object literal not allowed in strict mode", "AccessorDataProperty": "Object literal may not have data and accessor property with the same name", "AccessorGetSet": "Object literal may not have multiple get/set accessors with the same name", "StrictLHSAssignment": "Assignment to eval or arguments is not allowed in strict mode", "StrictLHSPostfix": "Postfix increment/decrement may not have eval or arguments operand in strict mode", "StrictLHSPrefix": "Prefix increment/decrement may not have eval or arguments operand in strict mode", "StrictReservedWord": "Use of future reserved word in strict mode", }) Regex = jsdict({ "NonAsciiIdentifierStart": RegExp(u"[\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]"), "NonAsciiIdentifierPart": RegExp(u"[\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u05d0-\u05ea\u05f0-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u0800-\u082d\u0840-\u085b\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0963\u0966-\u096f\u0971-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7\u09c8\u09cb-\u09ce\u09d7\u09dc\u09dd\u09df-\u09e3\u09e6-\u09f1\u0a01-\u0a03\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5c\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c58\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1\u0cf2\u0d02\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772\u1773\u1780-\u17d3\u17d7\u17dc\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19d9\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1cd0-\u1cd2\u1cd4-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u200c\u200d\u203f\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u2e2f\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua697\ua69f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua827\ua840-\ua873\ua880-\ua8c4\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua900-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a\uaa7b\uaa80-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabea\uabec\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\ufe70-\ufe74\ufe76-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]"), }) def assert__py__(condition=None, message=None): if not condition: raise RuntimeError("ASSERT: " + message) def isDecimalDigit(ch=None): return (ch >= 48) and (ch <= 57) def isHexDigit(ch=None): return "0123456789abcdefABCDEF".find(ch) >= 0 def isOctalDigit(ch=None): return "01234567".find(ch) >= 0 def isWhiteSpace(ch=None): return (((((ch == 32) or (ch == 9)) or (ch == 11)) or (ch == 12)) or (ch == 160)) or ((ch >= 5760) and (u"\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000\ufeff".find(unichr(ch)) > 0)) def isLineTerminator(ch=None): return (((ch == 10) or (ch == 13)) or (ch == 8232)) or (ch == 8233) def isIdentifierStart(ch=None): return (((((ch == 36) or (ch == 95)) or ((ch >= 65) and (ch <= 90))) or ((ch >= 97) and (ch <= 122))) or (ch == 92)) or ((ch >= 128) and Regex.NonAsciiIdentifierStart.test(unichr(ch))) def isIdentifierPart(ch=None): return ((((((ch == 36) or (ch == 95)) or ((ch >= 65) and (ch <= 90))) or ((ch >= 97) and (ch <= 122))) or ((ch >= 48) and (ch <= 57))) or (ch == 92)) or ((ch >= 128) and Regex.NonAsciiIdentifierPart.test(unichr(ch))) def isFutureReservedWord(id=None): while 1: if (id == "super") or ((id == "import") or ((id == "extends") or ((id == "export") or ((id == "enum") or (id == "class"))))): return True else: return False break def isStrictModeReservedWord(id=None): while 1: if (id == "let") or ((id == "yield") or ((id == "static") or ((id == "public") or ((id == "protected") or ((id == "private") or ((id == "package") or ((id == "interface") or (id == "implements")))))))): return True else: return False break def isRestrictedWord(id=None): return (id == "eval") or (id == "arguments") def isKeyword(id=None): if strict and isStrictModeReservedWord(id): return True while 1: if len(id) == 2: return ((id == "if") or (id == "in")) or (id == "do") elif len(id) == 3: return ((((id == "var") or (id == "for")) or (id == "new")) or (id == "try")) or (id == "let") elif len(id) == 4: return (((((id == "this") or (id == "else")) or (id == "case")) or (id == "void")) or (id == "with")) or (id == "enum") elif len(id) == 5: return (((((((id == "while") or (id == "break")) or (id == "catch")) or (id == "throw")) or (id == "const")) or (id == "yield")) or (id == "class")) or (id == "super") elif len(id) == 6: return (((((id == "return") or (id == "typeof")) or (id == "delete")) or (id == "switch")) or (id == "export")) or (id == "import") elif len(id) == 7: return ((id == "default") or (id == "finally")) or (id == "extends") elif len(id) == 8: return ((id == "function") or (id == "continue")) or (id == "debugger") elif len(id) == 10: return id == "instanceof" else: return False break def addComment(type=None, value=None, start=None, end=None, loc=None): comment = None assert__py__(('undefined' if not 'start' in locals() else typeof(start)) == "number", "Comment must have valid position") if state.lastCommentStart >= start: return state.lastCommentStart = start comment = jsdict({ "type": type, "value": value, }) if extra.range: comment.range = [start, end] if extra.loc: comment.loc = loc extra.comments.append(comment) def skipSingleLineComment(): global index, lineNumber, lineStart start = None loc = None ch = None comment = None start = index - 2 loc = jsdict({ "start": jsdict({ "line": lineNumber, "column": (index - lineStart) - 2, }), }) while index < length: ch = (ord(source[index]) if index < len(source) else None) index += 1 index if isLineTerminator(ch): if extra.comments: comment = source[(start + 2):(index - 1)] loc.end = jsdict({ "line": lineNumber, "column": (index - lineStart) - 1, }) addComment("Line", comment, start, index - 1, loc) if (ch == 13) and ((ord(source[index]) if index < len(source) else None) == 10): index += 1 index lineNumber += 1 lineNumber lineStart = index return if extra.comments: comment = source[(start + 2):index] loc.end = jsdict({ "line": lineNumber, "column": index - lineStart, }) addComment("Line", comment, start, index, loc) def skipMultiLineComment(): global index, lineNumber, lineStart start = None loc = None ch = None comment = None if extra.comments: start = index - 2 loc = jsdict({ "start": jsdict({ "line": lineNumber, "column": (index - lineStart) - 2, }), }) while index < length: ch = (ord(source[index]) if index < len(source) else None) if isLineTerminator(ch): if (ch == 13) and ((ord(source[index + 1]) if (index + 1) < len(source) else None) == 10): index += 1 index lineNumber += 1 lineNumber index += 1 index lineStart = index if index >= length: throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") elif ch == 42: if (ord(source[index + 1]) if (index + 1) < len(source) else None) == 47: index += 1 index index += 1 index if extra.comments: comment = source[(start + 2):(index - 2)] loc.end = jsdict({ "line": lineNumber, "column": index - lineStart, }) addComment("Block", comment, start, index, loc) return index += 1 index else: index += 1 index throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") def skipComment(): global index, lineNumber, lineStart ch = None while index < length: ch = (ord(source[index]) if index < len(source) else None) if isWhiteSpace(ch): index += 1 index elif isLineTerminator(ch): index += 1 index if (ch == 13) and ((ord(source[index]) if index < len(source) else None) == 10): index += 1 index lineNumber += 1 lineNumber lineStart = index elif ch == 47: ch = (ord(source[index + 1]) if (index + 1) < len(source) else None) if ch == 47: index += 1 index index += 1 index skipSingleLineComment() elif ch == 42: index += 1 index index += 1 index skipMultiLineComment() else: break else: break def scanHexEscape(prefix=None): global len__py__, index i = None len__py__ = None ch = None code = 0 len__py__ = (4 if prefix == "u" else 2) i = 0 while 1: if not (i < len__py__): break if (index < length) and isHexDigit(source[index]): index += 1 ch = source[index - 1] code = (code * 16) + "0123456789abcdef".find(ch.lower()) else: return "" i += 1 return unichr(code) def getEscapedIdentifier(): global index ch = None id = None index += 1 index += 1 ch = (ord(source[index - 1]) if index - 1 < len(source) else None) id = unichr(ch) if ch == 92: if (ord(source[index]) if index < len(source) else None) != 117: throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") index += 1 index ch = scanHexEscape("u") if ((not ch) or (ch == "\\")) or (not isIdentifierStart((ord(ch[0]) if 0 < len(ch) else None))): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") id = ch while index < length: ch = (ord(source[index]) if index < len(source) else None) if not isIdentifierPart(ch): break index += 1 index id += unichr(ch) if ch == 92: id = id[0:(0 + (len(id) - 1))] if (ord(source[index]) if index < len(source) else None) != 117: throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") index += 1 index ch = scanHexEscape("u") if ((not ch) or (ch == "\\")) or (not isIdentifierPart((ord(ch[0]) if 0 < len(ch) else None))): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") id += ch return id def getIdentifier(): global index start = None ch = None index += 1 start = index - 1 while index < length: ch = (ord(source[index]) if index < len(source) else None) if ch == 92: index = start return getEscapedIdentifier() if isIdentifierPart(ch): index += 1 index else: break return source[start:index] def scanIdentifier(): start = None id = None type = None start = index id = (getEscapedIdentifier() if (ord(source[index]) if index < len(source) else None) == 92 else getIdentifier()) if len(id) == 1: type = Token.Identifier elif isKeyword(id): type = Token.Keyword elif id == "null": type = Token.NullLiteral elif (id == "true") or (id == "false"): type = Token.BooleanLiteral else: type = Token.Identifier return jsdict({ "type": type, "value": id, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) def scanPunctuator(): global index start = index code = (ord(source[index]) if index < len(source) else None) code2 = None ch1 = source[index] ch2 = None ch3 = None ch4 = None while 1: if (code == 126) or ((code == 63) or ((code == 58) or ((code == 93) or ((code == 91) or ((code == 125) or ((code == 123) or ((code == 44) or ((code == 59) or ((code == 41) or ((code == 40) or (code == 46))))))))))): index += 1 index if extra.tokenize: if code == 40: extra.openParenToken = len(extra.tokens) elif code == 123: extra.openCurlyToken = len(extra.tokens) return jsdict({ "type": Token.Punctuator, "value": unichr(code), "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) else: code2 = (ord(source[index + 1]) if (index + 1) < len(source) else None) if code2 == 61: while 1: if (code == 124) or ((code == 94) or ((code == 62) or ((code == 60) or ((code == 47) or ((code == 45) or ((code == 43) or ((code == 42) or ((code == 38) or (code == 37))))))))): index += 2 return jsdict({ "type": Token.Punctuator, "value": unichr(code) + unichr(code2), "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) elif (code == 61) or (code == 33): index += 2 if (ord(source[index]) if index < len(source) else None) == 61: index += 1 index return jsdict({ "type": Token.Punctuator, "value": source[start:index], "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) else: break break break break ch2 = source[index + 1] if index + 1 < len(source) else None ch3 = source[index + 2] if index + 2 < len(source) else None ch4 = source[index + 3] if index + 3 < len(source) else None if ((ch1 == ">") and (ch2 == ">")) and (ch3 == ">"): if ch4 == "=": index += 4 return jsdict({ "type": Token.Punctuator, "value": ">>>=", "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) if ((ch1 == ">") and (ch2 == ">")) and (ch3 == ">"): index += 3 return jsdict({ "type": Token.Punctuator, "value": ">>>", "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) if ((ch1 == "<") and (ch2 == "<")) and (ch3 == "="): index += 3 return jsdict({ "type": Token.Punctuator, "value": "<<=", "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) if ((ch1 == ">") and (ch2 == ">")) and (ch3 == "="): index += 3 return jsdict({ "type": Token.Punctuator, "value": ">>=", "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) if (ch1 == ch2) and ("+-<>&|".find(ch1) >= 0): index += 2 return jsdict({ "type": Token.Punctuator, "value": ch1 + ch2, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) if "<>=!+-*%&|^/".find(ch1) >= 0: index += 1 index return jsdict({ "type": Token.Punctuator, "value": ch1, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") def scanHexLiteral(start=None): global index number = "" while index < length: if not isHexDigit(source[index]): break index += 1 number += source[index - 1] if len(number) == 0: throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") if isIdentifierStart((ord(source[index]) if index < len(source) else None)): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") return jsdict({ "type": Token.NumericLiteral, "value": parseInt("0x" + number, 16), "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) def scanOctalLiteral(start=None): global index index += 1 number = "0" + source[index - 1] while index < length: if not isOctalDigit(source[index]): break index += 1 number += source[index - 1] if isIdentifierStart((ord(source[index]) if index < len(source) else None)) or isDecimalDigit((ord(source[index]) if index < len(source) else None)): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") return jsdict({ "type": Token.NumericLiteral, "value": parseInt(number, 8), "octal": True, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) def scanNumericLiteral(): global index number = None start = None ch = None ch = source[index] assert__py__(isDecimalDigit((ord(ch[0]) if 0 < len(ch) else None)) or (ch == "."), "Numeric literal must start with a decimal digit or a decimal point") start = index number = "" if ch != ".": index += 1 number = source[index - 1] ch = source[index] if index < len(source) else None if number == "0": if (ch == "x") or (ch == "X"): index += 1 index return scanHexLiteral(start) if isOctalDigit(ch): return scanOctalLiteral(start) if ch and isDecimalDigit((ord(ch[0]) if 0 < len(ch) else None)): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") while isDecimalDigit((ord(source[index]) if index < len(source) else None)): index += 1 number += source[index - 1] ch = source[index] if index < len(source) else None if ch == ".": index += 1 number += source[index - 1] while isDecimalDigit((ord(source[index]) if index < len(source) else None)): index += 1 number += source[index - 1] ch = source[index] if (ch == "e") or (ch == "E"): index += 1 number += source[index - 1] ch = source[index] if (ch == "+") or (ch == "-"): index += 1 number += source[index - 1] if isDecimalDigit((ord(source[index]) if index < len(source) else None)): while isDecimalDigit((ord(source[index]) if index < len(source) else None)): index += 1 number += source[index - 1] else: throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") if isIdentifierStart((ord(source[index]) if index < len(source) else None)): throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") return jsdict({ "type": Token.NumericLiteral, "value": parseFloat(number), "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) def scanStringLiteral(): global index, lineNumber str = "" quote = None start = None ch = None code = None unescaped = None restore = None octal = False quote = source[index] assert__py__((quote == "'") or (quote == "\""), "String literal must starts with a quote") start = index index += 1 index while index < length: index += 1 ch = source[index - 1] if ch == quote: quote = "" break elif ch == "\\": index += 1 ch = source[index - 1] if (not ch) or (not isLineTerminator((ord(ch[0]) if 0 < len(ch) else None))): while 1: if ch == "n": str += u"\x0a" break elif ch == "r": str += u"\x0d" break elif ch == "t": str += u"\x09" break elif (ch == "x") or (ch == "u"): restore = index unescaped = scanHexEscape(ch) if unescaped: str += unescaped else: index = restore str += ch break elif ch == "b": str += u"\x08" break elif ch == "f": str += u"\x0c" break elif ch == "v": str += u"\x0b" break else: if isOctalDigit(ch): code = "01234567".find(ch) if code != 0: octal = True if (index < length) and isOctalDigit(source[index]): octal = True index += 1 code = (code * 8) + "01234567".find(source[index - 1]) if (("0123".find(ch) >= 0) and (index < length)) and isOctalDigit(source[index]): index += 1 code = (code * 8) + "01234567".find(source[index - 1]) str += unichr(code) else: str += ch break break else: lineNumber += 1 lineNumber if (ch == u"\x0d") and (source[index] == u"\x0a"): index += 1 index elif isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)): break else: str += ch if quote != "": throwError(jsdict({ }), Messages.UnexpectedToken, "ILLEGAL") return jsdict({ "type": Token.StringLiteral, "value": str, "octal": octal, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) def scanRegExp(): global lookahead, index str = None ch = None start = None pattern = None flags = None value = None classMarker = False restore = None terminated = False lookahead = None skipComment() start = index ch = source[index] assert__py__(ch == "/", "Regular expression literal must start with a slash") index += 1 str = source[index - 1] while index < length: index += 1 ch = source[index - 1] str += ch if classMarker: if ch == "]": classMarker = False else: if ch == "\\": index += 1 ch = source[index - 1] if isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)): throwError(jsdict({ }), Messages.UnterminatedRegExp) str += ch elif ch == "/": terminated = True break elif ch == "[": classMarker = True elif isLineTerminator((ord(ch[0]) if 0 < len(ch) else None)): throwError(jsdict({ }), Messages.UnterminatedRegExp) if not terminated: throwError(jsdict({ }), Messages.UnterminatedRegExp) pattern = str[1:(1 + (len(str) - 2))] flags = "" while index < length: ch = source[index] if not isIdentifierPart((ord(ch[0]) if 0 < len(ch) else None)): break index += 1 index if (ch == "\\") and (index < length): ch = source[index] if ch == "u": index += 1 index restore = index ch = scanHexEscape("u") if ch: flags += ch str += "\\u" while 1: if not (restore < index): break str += source[restore] restore += 1 else: index = restore flags += "u" str += "\\u" else: str += "\\" else: flags += ch str += ch try: value = RegExp(pattern, flags) except Exception as e: throwError(jsdict({ }), Messages.InvalidRegExp) peek() if extra.tokenize: return jsdict({ "type": Token.RegularExpression, "value": value, "lineNumber": lineNumber, "lineStart": lineStart, "range": [start, index], }) return jsdict({ "literal": str, "value": value, "range": [start, index], }) def isIdentifierName(token=None): return (((token.type == Token.Identifier) or (token.type == Token.Keyword)) or (token.type == Token.BooleanLiteral)) or (token.type == Token.NullLiteral) def advanceSlash(): prevToken = None checkToken = None prevToken = extra.tokens[len(extra.tokens) - 1] if not prevToken: return scanRegExp() if prevToken.type == "Punctuator": if prevToken.value == ")": checkToken = extra.tokens[extra.openParenToken - 1] if (checkToken and (checkToken.type == "Keyword")) and ((((checkToken.value == "if") or (checkToken.value == "while")) or (checkToken.value == "for")) or (checkToken.value == "with")): return scanRegExp() return scanPunctuator() if prevToken.value == "}": if extra.tokens[extra.openCurlyToken - 3] and (extra.tokens[extra.openCurlyToken - 3].type == "Keyword"): checkToken = extra.tokens[extra.openCurlyToken - 4] if not checkToken: return scanPunctuator() elif extra.tokens[extra.openCurlyToken - 4] and (extra.tokens[extra.openCurlyToken - 4].type == "Keyword"): checkToken = extra.tokens[extra.openCurlyToken - 5] if not checkToken: return scanRegExp() else: return scanPunctuator() if FnExprTokens.indexOf(checkToken.value) >= 0: return scanPunctuator() return scanRegExp() return scanRegExp() if prevToken.type == "Keyword": return scanRegExp() return scanPunctuator() def advance(): ch = None skipComment() if index >= length: return jsdict({ "type": Token.EOF, "lineNumber": lineNumber, "lineStart": lineStart, "range": [index, index], }) ch = (ord(source[index]) if index < len(source) else None) if ((ch == 40) or (ch == 41)) or (ch == 58): return scanPunctuator() if (ch == 39) or (ch == 34): return scanStringLiteral() if isIdentifierStart(ch): return scanIdentifier() if ch == 46: if isDecimalDigit((ord(source[index + 1]) if (index + 1) < len(source) else None)): return scanNumericLiteral() return scanPunctuator() if isDecimalDigit(ch): return scanNumericLiteral() if extra.tokenize and (ch == 47): return advanceSlash() return scanPunctuator() def lex(): global index, lineNumber, lineStart, lookahead token = None token = lookahead index = token.range[1] lineNumber = token.lineNumber lineStart = token.lineStart lookahead = advance() index = token.range[1] lineNumber = token.lineNumber lineStart = token.lineStart return token def peek(): global lookahead, index, lineNumber, lineStart pos = None line = None start = None pos = index line = lineNumber start = lineStart lookahead = advance() index = pos lineNumber = line lineStart = start SyntaxTreeDelegate = jsdict({ "name": "SyntaxTree", "markStart": __temp__0, "markEnd": __temp__1, "markEndIf": __temp__2, "postProcess": __temp__3, "createArrayExpression": __temp__4, "createAssignmentExpression": __temp__5, "createBinaryExpression": __temp__6, "createBlockStatement": __temp__7, "createBreakStatement": __temp__8, "createCallExpression": __temp__9, "createCatchClause": __temp__10, "createConditionalExpression": __temp__11, "createContinueStatement": __temp__12, "createDebuggerStatement": __temp__13, "createDoWhileStatement": __temp__14, "createEmptyStatement": __temp__15, "createExpressionStatement": __temp__16, "createForStatement": __temp__17, "createForInStatement": __temp__18, "createFunctionDeclaration": __temp__19, "createFunctionExpression": __temp__20, "createIdentifier": __temp__21, "createIfStatement": __temp__22, "createLabeledStatement": __temp__23, "createLiteral": __temp__24, "createMemberExpression": __temp__25, "createNewExpression": __temp__26, "createObjectExpression": __temp__27, "createPostfixExpression": __temp__28, "createProgram": __temp__29, "createProperty": __temp__30, "createReturnStatement": __temp__31, "createSequenceExpression": __temp__32, "createSwitchCase": __temp__33, "createSwitchStatement": __temp__34, "createThisExpression": __temp__35, "createThrowStatement": __temp__36, "createTryStatement": __temp__37, "createUnaryExpression": __temp__38, "createVariableDeclaration": __temp__39, "createVariableDeclarator": __temp__40, "createWhileStatement": __temp__41, "createWithStatement": __temp__42, }) def peekLineTerminator(): global index, lineNumber, lineStart pos = None line = None start = None found = None pos = index line = lineNumber start = lineStart skipComment() found = lineNumber != line index = pos lineNumber = line lineStart = start return found def throwError(token=None, messageFormat=None, a=None): def __temp__43(whole=None, index=None): assert__py__(index < len(args), "Message reference must be in range") return args[index] error = None args = Array.prototype.slice.call(arguments, 2) msg = messageFormat.replace(RegExp(r'%(\d)'), __temp__43) if ('undefined' if not ('lineNumber' in token) else typeof(token.lineNumber)) == "number": error = RuntimeError((("Line " + token.lineNumber) + ": ") + msg) error.index = token.range[0] error.lineNumber = token.lineNumber error.column = (token.range[0] - lineStart) + 1 else: error = RuntimeError((("Line " + lineNumber) + ": ") + msg) error.index = index error.lineNumber = lineNumber error.column = (index - lineStart) + 1 error.description = msg raise error def throwErrorTolerant(): try: throwError.apply(None, arguments) except Exception as e: if extra.errors: extra.errors.append(e) else: raise def throwUnexpected(token=None): if token.type == Token.EOF: throwError(token, Messages.UnexpectedEOS) if token.type == Token.NumericLiteral: throwError(token, Messages.UnexpectedNumber) if token.type == Token.StringLiteral: throwError(token, Messages.UnexpectedString) if token.type == Token.Identifier: throwError(token, Messages.UnexpectedIdentifier) if token.type == Token.Keyword: if isFutureReservedWord(token.value): throwError(token, Messages.UnexpectedReserved) elif strict and isStrictModeReservedWord(token.value): throwErrorTolerant(token, Messages.StrictReservedWord) return throwError(token, Messages.UnexpectedToken, token.value) throwError(token, Messages.UnexpectedToken, token.value) def expect(value=None): token = lex() if (token.type != Token.Punctuator) or (token.value != value): throwUnexpected(token) def expectKeyword(keyword=None): token = lex() if (token.type != Token.Keyword) or (token.value != keyword): throwUnexpected(token) def match(value=None): return (lookahead.type == Token.Punctuator) and (lookahead.value == value) def matchKeyword(keyword=None): return (lookahead.type == Token.Keyword) and (lookahead.value == keyword) def matchAssign(): op = None if lookahead.type != Token.Punctuator: return False op = lookahead.value return (((((((((((op == "=") or (op == "*=")) or (op == "/=")) or (op == "%=")) or (op == "+=")) or (op == "-=")) or (op == "<<=")) or (op == ">>=")) or (op == ">>>=")) or (op == "&=")) or (op == "^=")) or (op == "|=") def consumeSemicolon(): line = None if (ord(source[index]) if index < len(source) else None) == 59: lex() return line = lineNumber skipComment() if lineNumber != line: return if match(";"): lex() return if (lookahead.type != Token.EOF) and (not match("}")): throwUnexpected(lookahead) def isLeftHandSide(expr=None): return (expr.type == Syntax.Identifier) or (expr.type == Syntax.MemberExpression) def parseArrayInitialiser(): elements = [] expect("[") while not match("]"): if match(","): lex() elements.append(None) else: elements.append(parseAssignmentExpression()) if not match("]"): expect(",") expect("]") return delegate.createArrayExpression(elements) def parsePropertyFunction(param=None, first=None): global strict previousStrict = None body = None previousStrict = strict skipComment() delegate.markStart() body = parseFunctionSourceElements() if (first and strict) and isRestrictedWord(param[0].name): throwErrorTolerant(first, Messages.StrictParamName) strict = previousStrict return delegate.markEnd(delegate.createFunctionExpression(None, param, [], body)) def parseObjectPropertyKey(): token = None skipComment() delegate.markStart() token = lex() if (token.type == Token.StringLiteral) or (token.type == Token.NumericLiteral): if strict and token.octal: throwErrorTolerant(token, Messages.StrictOctalLiteral) return delegate.markEnd(delegate.createLiteral(token)) return delegate.markEnd(delegate.createIdentifier(token.value)) def parseObjectProperty(): token = None key = None id = None value = None param = None token = lookahead skipComment() delegate.markStart() if token.type == Token.Identifier: id = parseObjectPropertyKey() if (token.value == "get") and (not match(":")): key = parseObjectPropertyKey() expect("(") expect(")") value = parsePropertyFunction([]) return delegate.markEnd(delegate.createProperty("get", key, value)) if (token.value == "set") and (not match(":")): key = parseObjectPropertyKey() expect("(") token = lookahead if token.type != Token.Identifier: expect(")") throwErrorTolerant(token, Messages.UnexpectedToken, token.value) value = parsePropertyFunction([]) else: param = [parseVariableIdentifier()] expect(")") value = parsePropertyFunction(param, token) return delegate.markEnd(delegate.createProperty("set", key, value)) expect(":") value = parseAssignmentExpression() return delegate.markEnd(delegate.createProperty("init", id, value)) if (token.type == Token.EOF) or (token.type == Token.Punctuator): throwUnexpected(token) else: key = parseObjectPropertyKey() expect(":") value = parseAssignmentExpression() return delegate.markEnd(delegate.createProperty("init", key, value)) def parseObjectInitialiser(): properties = [] property = None name = None key = None kind = None map = jsdict({ }) toString = str expect("{") while not match("}"): property = parseObjectProperty() if property.key.type == Syntax.Identifier: name = property.key.name else: name = toString(property.key.value) kind = (PropertyKind.Data if property.kind == "init" else (PropertyKind.Get if property.kind == "get" else PropertyKind.Set)) key = "$" + name if key in map: if map[key] == PropertyKind.Data: if strict and (kind == PropertyKind.Data): throwErrorTolerant(jsdict({ }), Messages.StrictDuplicateProperty) elif kind != PropertyKind.Data: throwErrorTolerant(jsdict({ }), Messages.AccessorDataProperty) else: if kind == PropertyKind.Data: throwErrorTolerant(jsdict({ }), Messages.AccessorDataProperty) elif map[key] & kind: throwErrorTolerant(jsdict({ }), Messages.AccessorGetSet) map[key] |= kind else: map[key] = kind properties.append(property) if not match("}"): expect(",") expect("}") return delegate.createObjectExpression(properties) def parseGroupExpression(): expr = None expect("(") expr = parseExpression() expect(")") return expr def parsePrimaryExpression(): type = None token = None expr = None if match("("): return parseGroupExpression() type = lookahead.type delegate.markStart() if type == Token.Identifier: expr = delegate.createIdentifier(lex().value) elif (type == Token.StringLiteral) or (type == Token.NumericLiteral): if strict and lookahead.octal: throwErrorTolerant(lookahead, Messages.StrictOctalLiteral) expr = delegate.createLiteral(lex()) elif type == Token.Keyword: if matchKeyword("this"): lex() expr = delegate.createThisExpression() elif matchKeyword("function"): expr = parseFunctionExpression() elif type == Token.BooleanLiteral: token = lex() token.value = token.value == "true" expr = delegate.createLiteral(token) elif type == Token.NullLiteral: token = lex() token.value = None expr = delegate.createLiteral(token) elif match("["): expr = parseArrayInitialiser() elif match("{"): expr = parseObjectInitialiser() elif match("/") or match("/="): expr = delegate.createLiteral(scanRegExp()) if expr: return delegate.markEnd(expr) throwUnexpected(lex()) def parseArguments(): args = [] expect("(") if not match(")"): while index < length: args.append(parseAssignmentExpression()) if match(")"): break expect(",") expect(")") return args def parseNonComputedProperty(): token = None delegate.markStart() token = lex() if not isIdentifierName(token): throwUnexpected(token) return delegate.markEnd(delegate.createIdentifier(token.value)) def parseNonComputedMember(): expect(".") return parseNonComputedProperty() def parseComputedMember(): expr = None expect("[") expr = parseExpression() expect("]") return expr def parseNewExpression(): callee = None args = None delegate.markStart() expectKeyword("new") callee = parseLeftHandSideExpression() args = (parseArguments() if match("(") else []) return delegate.markEnd(delegate.createNewExpression(callee, args)) def parseLeftHandSideExpressionAllowCall(): marker = None expr = None args = None property = None marker = createLocationMarker() expr = (parseNewExpression() if matchKeyword("new") else parsePrimaryExpression()) while (match(".") or match("[")) or match("("): if match("("): args = parseArguments() expr = delegate.createCallExpression(expr, args) elif match("["): property = parseComputedMember() expr = delegate.createMemberExpression("[", expr, property) else: property = parseNonComputedMember() expr = delegate.createMemberExpression(".", expr, property) if marker: marker.end() marker.apply(expr) return expr def parseLeftHandSideExpression(): marker = None expr = None property = None marker = createLocationMarker() expr = (parseNewExpression() if matchKeyword("new") else parsePrimaryExpression()) while match(".") or match("["): if match("["): property = parseComputedMember() expr = delegate.createMemberExpression("[", expr, property) else: property = parseNonComputedMember() expr = delegate.createMemberExpression(".", expr, property) if marker: marker.end() marker.apply(expr) return expr def parsePostfixExpression(): expr = None token = None delegate.markStart() expr = parseLeftHandSideExpressionAllowCall() if lookahead.type == Token.Punctuator: if (match("++") or match("--")) and (not peekLineTerminator()): if (strict and (expr.type == Syntax.Identifier)) and isRestrictedWord(expr.name): throwErrorTolerant(jsdict({ }), Messages.StrictLHSPostfix) if not isLeftHandSide(expr): throwError(jsdict({ }), Messages.InvalidLHSInAssignment) token = lex() expr = delegate.createPostfixExpression(token.value, expr) return delegate.markEndIf(expr) def parseUnaryExpression(): token = None expr = None delegate.markStart() if (lookahead.type != Token.Punctuator) and (lookahead.type != Token.Keyword): expr = parsePostfixExpression() elif match("++") or match("--"): token = lex() expr = parseUnaryExpression() if (strict and (expr.type == Syntax.Identifier)) and isRestrictedWord(expr.name): throwErrorTolerant(jsdict({ }), Messages.StrictLHSPrefix) if not isLeftHandSide(expr): throwError(jsdict({ }), Messages.InvalidLHSInAssignment) expr = delegate.createUnaryExpression(token.value, expr) elif ((match("+") or match("-")) or match("~")) or match("!"): token = lex() expr = parseUnaryExpression() expr = delegate.createUnaryExpression(token.value, expr) elif (matchKeyword("delete") or matchKeyword("void")) or matchKeyword("typeof"): token = lex() expr = parseUnaryExpression() expr = delegate.createUnaryExpression(token.value, expr) if (strict and (expr.operator == "delete")) and (expr.argument.type == Syntax.Identifier): throwErrorTolerant(jsdict({ }), Messages.StrictDelete) else: expr = parsePostfixExpression() return delegate.markEndIf(expr) def binaryPrecedence(token=None, allowIn=None): prec = 0 if (token.type != Token.Punctuator) and (token.type != Token.Keyword): return 0 while 1: if token.value == "||": prec = 1 break elif token.value == "&&": prec = 2 break elif token.value == "|": prec = 3 break elif token.value == "^": prec = 4 break elif token.value == "&": prec = 5 break elif (token.value == "!==") or ((token.value == "===") or ((token.value == "!=") or (token.value == "=="))): prec = 6 break elif (token.value == "instanceof") or ((token.value == ">=") or ((token.value == "<=") or ((token.value == ">") or (token.value == "<")))): prec = 7 break elif token.value == "in": prec = (7 if allowIn else 0) break elif (token.value == ">>>") or ((token.value == ">>") or (token.value == "<<")): prec = 8 break elif (token.value == "-") or (token.value == "+"): prec = 9 break elif (token.value == "%") or ((token.value == "/") or (token.value == "*")): prec = 11 break else: break break return prec def parseBinaryExpression(): marker = None markers = None expr = None token = None prec = None previousAllowIn = None stack = None right = None operator = None left = None i = None previousAllowIn = state.allowIn state.allowIn = True marker = createLocationMarker() left = parseUnaryExpression() token = lookahead prec = binaryPrecedence(token, previousAllowIn) if prec == 0: return left token.prec = prec lex() markers = [marker, createLocationMarker()] right = parseUnaryExpression() stack = [left, token, right] prec = binaryPrecedence(lookahead, previousAllowIn) while prec > 0: while (len(stack) > 2) and (prec <= stack[len(stack) - 2].prec): right = stack.pop() operator = stack.pop().value left = stack.pop() expr = delegate.createBinaryExpression(operator, left, right) markers.pop() marker = markers.pop() if marker: marker.end() marker.apply(expr) stack.append(expr) markers.append(marker) token = lex() token.prec = prec stack.append(token) markers.append(createLocationMarker()) expr = parseUnaryExpression() stack.append(expr) prec = binaryPrecedence(lookahead, previousAllowIn) state.allowIn = previousAllowIn i = len(stack) - 1 expr = stack[i] markers.pop() while i > 1: expr = delegate.createBinaryExpression(stack[i - 1].value, stack[i - 2], expr) i -= 2 marker = markers.pop() if marker: marker.end() marker.apply(expr) return expr def parseConditionalExpression(): expr = None previousAllowIn = None consequent = None alternate = None delegate.markStart() expr = parseBinaryExpression() if match("?"): lex() previousAllowIn = state.allowIn state.allowIn = True consequent = parseAssignmentExpression() state.allowIn = previousAllowIn expect(":") alternate = parseAssignmentExpression() expr = delegate.markEnd(delegate.createConditionalExpression(expr, consequent, alternate)) else: delegate.markEnd(jsdict({ })) return expr def parseAssignmentExpression(): token = None left = None right = None node = None token = lookahead delegate.markStart() left = parseConditionalExpression() node = left if matchAssign(): if not isLeftHandSide(left): throwError(jsdict({ }), Messages.InvalidLHSInAssignment) if (strict and (left.type == Syntax.Identifier)) and isRestrictedWord(left.name): throwErrorTolerant(token, Messages.StrictLHSAssignment) token = lex() right = parseAssignmentExpression() node = delegate.createAssignmentExpression(token.value, left, right) return delegate.markEndIf(node) def parseExpression(): expr = None delegate.markStart() expr = parseAssignmentExpression() if match(","): expr = delegate.createSequenceExpression([expr]) while index < length: if not match(","): break lex() expr.expressions.append(parseAssignmentExpression()) return delegate.markEndIf(expr) def parseStatementList(): list__py__ = [] statement = None while index < length: if match("}"): break statement = parseSourceElement() if ('undefined' if not 'statement' in locals() else typeof(statement)) == "undefined": break list__py__.append(statement) return list__py__ def parseBlock(): block = None skipComment() delegate.markStart() expect("{") block = parseStatementList() expect("}") return delegate.markEnd(delegate.createBlockStatement(block)) def parseVariableIdentifier(): token = None skipComment() delegate.markStart() token = lex() if token.type != Token.Identifier: throwUnexpected(token) return delegate.markEnd(delegate.createIdentifier(token.value)) def parseVariableDeclaration(kind=None): init = None id = None skipComment() delegate.markStart() id = parseVariableIdentifier() if strict and isRestrictedWord(id.name): throwErrorTolerant(jsdict({ }), Messages.StrictVarName) if kind == "const": expect("=") init = parseAssignmentExpression() elif match("="): lex() init = parseAssignmentExpression() return delegate.markEnd(delegate.createVariableDeclarator(id, init)) def parseVariableDeclarationList(kind=None): list__py__ = [] while 1: list__py__.append(parseVariableDeclaration(kind)) if not match(","): break lex() if not (index < length): break return list__py__ def parseVariableStatement(): declarations = None expectKeyword("var") declarations = parseVariableDeclarationList() consumeSemicolon() return delegate.createVariableDeclaration(declarations, "var") def parseConstLetDeclaration(kind=None): declarations = None skipComment() delegate.markStart() expectKeyword(kind) declarations = parseVariableDeclarationList(kind) consumeSemicolon() return delegate.markEnd(delegate.createVariableDeclaration(declarations, kind)) def parseEmptyStatement(): expect(";") return delegate.createEmptyStatement() def parseExpressionStatement(): expr = parseExpression() consumeSemicolon() return delegate.createExpressionStatement(expr) def parseIfStatement(): test = None consequent = None alternate = None expectKeyword("if") expect("(") test = parseExpression() expect(")") consequent = parseStatement() if matchKeyword("else"): lex() alternate = parseStatement() else: alternate = None return delegate.createIfStatement(test, consequent, alternate) def parseDoWhileStatement(): body = None test = None oldInIteration = None expectKeyword("do") oldInIteration = state.inIteration state.inIteration = True body = parseStatement() state.inIteration = oldInIteration expectKeyword("while") expect("(") test = parseExpression() expect(")") if match(";"): lex() return delegate.createDoWhileStatement(body, test) def parseWhileStatement(): test = None body = None oldInIteration = None expectKeyword("while") expect("(") test = parseExpression() expect(")") oldInIteration = state.inIteration state.inIteration = True body = parseStatement() state.inIteration = oldInIteration return delegate.createWhileStatement(test, body) def parseForVariableDeclaration(): token = None declarations = None delegate.markStart() token = lex() declarations = parseVariableDeclarationList() return delegate.markEnd(delegate.createVariableDeclaration(declarations, token.value)) def parseForStatement(): init = None test = None update = None left = None right = None body = None oldInIteration = None update = None test = update init = test expectKeyword("for") expect("(") if match(";"): lex() else: if matchKeyword("var") or matchKeyword("let"): state.allowIn = False init = parseForVariableDeclaration() state.allowIn = True if (len(init.declarations) == 1) and matchKeyword("in"): lex() left = init right = parseExpression() init = None else: state.allowIn = False init = parseExpression() state.allowIn = True if matchKeyword("in"): if not isLeftHandSide(init): throwError(jsdict({ }), Messages.InvalidLHSInForIn) lex() left = init right = parseExpression() init = None if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined": expect(";") if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined": if not match(";"): test = parseExpression() expect(";") if not match(")"): update = parseExpression() expect(")") oldInIteration = state.inIteration state.inIteration = True body = parseStatement() state.inIteration = oldInIteration return (delegate.createForStatement(init, test, update, body) if ('undefined' if not 'left' in locals() else typeof(left)) == "undefined" else delegate.createForInStatement(left, right, body)) def parseContinueStatement(): label = None key = None expectKeyword("continue") if (ord(source[index]) if index < len(source) else None) == 59: lex() if not state.inIteration: throwError(jsdict({ }), Messages.IllegalContinue) return delegate.createContinueStatement(None) if peekLineTerminator(): if not state.inIteration: throwError(jsdict({ }), Messages.IllegalContinue) return delegate.createContinueStatement(None) if lookahead.type == Token.Identifier: label = parseVariableIdentifier() key = "$" + label.name if not (key in state.labelSet): throwError(jsdict({ }), Messages.UnknownLabel, label.name) consumeSemicolon() if (label == None) and (not state.inIteration): throwError(jsdict({ }), Messages.IllegalContinue) return delegate.createContinueStatement(label) def parseBreakStatement(): label = None key = None expectKeyword("break") if (ord(source[index]) if index < len(source) else None) == 59: lex() if not (state.inIteration or state.inSwitch): throwError(jsdict({ }), Messages.IllegalBreak) return delegate.createBreakStatement(None) if peekLineTerminator(): if not (state.inIteration or state.inSwitch): throwError(jsdict({ }), Messages.IllegalBreak) return delegate.createBreakStatement(None) if lookahead.type == Token.Identifier: label = parseVariableIdentifier() key = "$" + label.name if not (key in state.labelSet): throwError(jsdict({ }), Messages.UnknownLabel, label.name) consumeSemicolon() if (label == None) and (not (state.inIteration or state.inSwitch)): throwError(jsdict({ }), Messages.IllegalBreak) return delegate.createBreakStatement(label) def parseReturnStatement(): argument = None expectKeyword("return") if not state.inFunctionBody: throwErrorTolerant(jsdict({ }), Messages.IllegalReturn) if (ord(source[index]) if index < len(source) else None) == 32: if isIdentifierStart((ord(source[index + 1]) if (index + 1) < len(source) else None)): argument = parseExpression() consumeSemicolon() return delegate.createReturnStatement(argument) if peekLineTerminator(): return delegate.createReturnStatement(None) if not match(";"): if (not match("}")) and (lookahead.type != Token.EOF): argument = parseExpression() consumeSemicolon() return delegate.createReturnStatement(argument) def parseWithStatement(): object = None body = None if strict: throwErrorTolerant(jsdict({ }), Messages.StrictModeWith) expectKeyword("with") expect("(") object = parseExpression() expect(")") body = parseStatement() return delegate.createWithStatement(object, body) def parseSwitchCase(): test = None consequent = [] statement = None skipComment() delegate.markStart() if matchKeyword("default"): lex() test = None else: expectKeyword("case") test = parseExpression() expect(":") while index < length: if (match("}") or matchKeyword("default")) or matchKeyword("case"): break statement = parseStatement() consequent.append(statement) return delegate.markEnd(delegate.createSwitchCase(test, consequent)) def parseSwitchStatement(): discriminant = None cases = None clause = None oldInSwitch = None defaultFound = None expectKeyword("switch") expect("(") discriminant = parseExpression() expect(")") expect("{") if match("}"): lex() return delegate.createSwitchStatement(discriminant) cases = [] oldInSwitch = state.inSwitch state.inSwitch = True defaultFound = False while index < length: if match("}"): break clause = parseSwitchCase() if clause.test == None: if defaultFound: throwError(jsdict({ }), Messages.MultipleDefaultsInSwitch) defaultFound = True cases.append(clause) state.inSwitch = oldInSwitch expect("}") return delegate.createSwitchStatement(discriminant, cases) def parseThrowStatement(): argument = None expectKeyword("throw") if peekLineTerminator(): throwError(jsdict({ }), Messages.NewlineAfterThrow) argument = parseExpression() consumeSemicolon() return delegate.createThrowStatement(argument) def parseCatchClause(): param = None body = None skipComment() delegate.markStart() expectKeyword("catch") expect("(") if match(")"): throwUnexpected(lookahead) param = parseVariableIdentifier() if strict and isRestrictedWord(param.name): throwErrorTolerant(jsdict({ }), Messages.StrictCatchVariable) expect(")") body = parseBlock() return delegate.markEnd(delegate.createCatchClause(param, body)) def parseTryStatement(): block = None handlers = [] finalizer = None expectKeyword("try") block = parseBlock() if matchKeyword("catch"): handlers.append(parseCatchClause()) if matchKeyword("finally"): lex() finalizer = parseBlock() if (len(handlers) == 0) and (not finalizer): throwError(jsdict({ }), Messages.NoCatchOrFinally) return delegate.createTryStatement(block, [], handlers, finalizer) def parseDebuggerStatement(): expectKeyword("debugger") consumeSemicolon() return delegate.createDebuggerStatement() def parseStatement(): type = lookahead.type expr = None labeledBody = None key = None if type == Token.EOF: throwUnexpected(lookahead) skipComment() delegate.markStart() if type == Token.Punctuator: while 1: if lookahead.value == ";": return delegate.markEnd(parseEmptyStatement()) elif lookahead.value == "{": return delegate.markEnd(parseBlock()) elif lookahead.value == "(": return delegate.markEnd(parseExpressionStatement()) else: break break if type == Token.Keyword: while 1: if lookahead.value == "break": return delegate.markEnd(parseBreakStatement()) elif lookahead.value == "continue": return delegate.markEnd(parseContinueStatement()) elif lookahead.value == "debugger": return delegate.markEnd(parseDebuggerStatement()) elif lookahead.value == "do": return delegate.markEnd(parseDoWhileStatement()) elif lookahead.value == "for": return delegate.markEnd(parseForStatement()) elif lookahead.value == "function": return delegate.markEnd(parseFunctionDeclaration()) elif lookahead.value == "if": return delegate.markEnd(parseIfStatement()) elif lookahead.value == "return": return delegate.markEnd(parseReturnStatement()) elif lookahead.value == "switch": return delegate.markEnd(parseSwitchStatement()) elif lookahead.value == "throw": return delegate.markEnd(parseThrowStatement()) elif lookahead.value == "try": return delegate.markEnd(parseTryStatement()) elif lookahead.value == "var": return delegate.markEnd(parseVariableStatement()) elif lookahead.value == "while": return delegate.markEnd(parseWhileStatement()) elif lookahead.value == "with": return delegate.markEnd(parseWithStatement()) else: break break expr = parseExpression() if (expr.type == Syntax.Identifier) and match(":"): lex() key = "$" + expr.name if key in state.labelSet: throwError(jsdict({ }), Messages.Redeclaration, "Label", expr.name) state.labelSet[key] = True labeledBody = parseStatement() del state.labelSet[key] return delegate.markEnd(delegate.createLabeledStatement(expr, labeledBody)) consumeSemicolon() return delegate.markEnd(delegate.createExpressionStatement(expr)) def parseFunctionSourceElements(): global strict sourceElement = None sourceElements = [] token = None directive = None firstRestricted = None oldLabelSet = None oldInIteration = None oldInSwitch = None oldInFunctionBody = None skipComment() delegate.markStart() expect("{") while index < length: if lookahead.type != Token.StringLiteral: break token = lookahead sourceElement = parseSourceElement() sourceElements.append(sourceElement) if sourceElement.expression.type != Syntax.Literal: break directive = source[(token.range[0] + 1):(token.range[1] - 1)] if directive == "use strict": strict = True if firstRestricted: throwErrorTolerant(firstRestricted, Messages.StrictOctalLiteral) else: if (not firstRestricted) and token.octal: firstRestricted = token oldLabelSet = state.labelSet oldInIteration = state.inIteration oldInSwitch = state.inSwitch oldInFunctionBody = state.inFunctionBody state.labelSet = jsdict({ }) state.inIteration = False state.inSwitch = False state.inFunctionBody = True while index < length: if match("}"): break sourceElement = parseSourceElement() if ('undefined' if not 'sourceElement' in locals() else typeof(sourceElement)) == "undefined": break sourceElements.append(sourceElement) expect("}") state.labelSet = oldLabelSet state.inIteration = oldInIteration state.inSwitch = oldInSwitch state.inFunctionBody = oldInFunctionBody return delegate.markEnd(delegate.createBlockStatement(sourceElements)) def parseParams(firstRestricted=None): param = None params = [] token = None stricted = None paramSet = None key = None message = None expect("(") if not match(")"): paramSet = jsdict({ }) while index < length: token = lookahead param = parseVariableIdentifier() key = "$" + token.value if strict: if isRestrictedWord(token.value): stricted = token message = Messages.StrictParamName if key in paramSet: stricted = token message = Messages.StrictParamDupe elif not firstRestricted: if isRestrictedWord(token.value): firstRestricted = token message = Messages.StrictParamName elif isStrictModeReservedWord(token.value): firstRestricted = token message = Messages.StrictReservedWord elif key in paramSet: firstRestricted = token message = Messages.StrictParamDupe params.append(param) paramSet[key] = True if match(")"): break expect(",") expect(")") return jsdict({ "params": params, "stricted": stricted, "firstRestricted": firstRestricted, "message": message, }) def parseFunctionDeclaration(): global strict id = None params = [] body = None token = None stricted = None tmp = None firstRestricted = None message = None previousStrict = None skipComment() delegate.markStart() expectKeyword("function") token = lookahead id = parseVariableIdentifier() if strict: if isRestrictedWord(token.value): throwErrorTolerant(token, Messages.StrictFunctionName) else: if isRestrictedWord(token.value): firstRestricted = token message = Messages.StrictFunctionName elif isStrictModeReservedWord(token.value): firstRestricted = token message = Messages.StrictReservedWord tmp = parseParams(firstRestricted) params = tmp.params stricted = tmp.stricted firstRestricted = tmp.firstRestricted if tmp.message: message = tmp.message previousStrict = strict body = parseFunctionSourceElements() if strict and firstRestricted: throwError(firstRestricted, message) if strict and stricted: throwErrorTolerant(stricted, message) strict = previousStrict return delegate.markEnd(delegate.createFunctionDeclaration(id, params, [], body)) def parseFunctionExpression(): global strict token = None id = None stricted = None firstRestricted = None message = None tmp = None params = [] body = None previousStrict = None delegate.markStart() expectKeyword("function") if not match("("): token = lookahead id = parseVariableIdentifier() if strict: if isRestrictedWord(token.value): throwErrorTolerant(token, Messages.StrictFunctionName) else: if isRestrictedWord(token.value): firstRestricted = token message = Messages.StrictFunctionName elif isStrictModeReservedWord(token.value): firstRestricted = token message = Messages.StrictReservedWord tmp = parseParams(firstRestricted) params = tmp.params stricted = tmp.stricted firstRestricted = tmp.firstRestricted if tmp.message: message = tmp.message previousStrict = strict body = parseFunctionSourceElements() if strict and firstRestricted: throwError(firstRestricted, message) if strict and stricted: throwErrorTolerant(stricted, message) strict = previousStrict return delegate.markEnd(delegate.createFunctionExpression(id, params, [], body)) def parseSourceElement(): if lookahead.type == Token.Keyword: while 1: if (lookahead.value == "let") or (lookahead.value == "const"): return parseConstLetDeclaration(lookahead.value) elif lookahead.value == "function": return parseFunctionDeclaration() else: return parseStatement() break if lookahead.type != Token.EOF: return parseStatement() def parseSourceElements(): global strict sourceElement = None sourceElements = [] token = None directive = None firstRestricted = None while index < length: token = lookahead if token.type != Token.StringLiteral: break sourceElement = parseSourceElement() sourceElements.append(sourceElement) if sourceElement.expression.type != Syntax.Literal: break directive = source[(token.range[0] + 1):(token.range[1] - 1)] if directive == "use strict": strict = True if firstRestricted: throwErrorTolerant(firstRestricted, Messages.StrictOctalLiteral) else: if (not firstRestricted) and token.octal: firstRestricted = token while index < length: sourceElement = parseSourceElement() if ('undefined' if not 'sourceElement' in locals() else typeof(sourceElement)) == "undefined": break sourceElements.append(sourceElement) return sourceElements def parseProgram(): global strict body = None skipComment() delegate.markStart() strict = False peek() body = parseSourceElements() return delegate.markEnd(delegate.createProgram(body)) def collectToken(): start = None loc = None token = None range = None value = None skipComment() start = index loc = jsdict({ "start": jsdict({ "line": lineNumber, "column": index - lineStart, }), }) token = extra.advance() loc.end = jsdict({ "line": lineNumber, "column": index - lineStart, }) if token.type != Token.EOF: range = [token.range[0], token.range[1]] value = source[token.range[0]:token.range[1]] extra.tokens.append(jsdict({ "type": TokenName[token.type], "value": value, "range": range, "loc": loc, })) return token def collectRegex(): pos = None loc = None regex = None token = None skipComment() pos = index loc = jsdict({ "start": jsdict({ "line": lineNumber, "column": index - lineStart, }), }) regex = extra.scanRegExp() loc.end = jsdict({ "line": lineNumber, "column": index - lineStart, }) if not extra.tokenize: if len(extra.tokens) > 0: token = extra.tokens[len(extra.tokens) - 1] if (token.range[0] == pos) and (token.type == "Punctuator"): if (token.value == "/") or (token.value == "/="): extra.tokens.pop() extra.tokens.append(jsdict({ "type": "RegularExpression", "value": regex.literal, "range": [pos, index], "loc": loc, })) return regex def filterTokenLocation(): i = None entry = None token = None tokens = [] i = 0 while 1: if not (i < len(extra.tokens)): break entry = extra.tokens[i] token = jsdict({ "type": entry.type, "value": entry.value, }) if extra.range: token.range = entry.range if extra.loc: token.loc = entry.loc tokens.append(token) i += 1 extra.tokens = tokens class LocationMarker(object): def __init__(self=None): self.marker = [index, lineNumber, index - lineStart, 0, 0, 0] def end(self=None): self.marker[3] = index self.marker[4] = lineNumber self.marker[5] = index - lineStart def apply(self=None, node=None): if extra.range: node.range = [self.marker[0], self.marker[3]] if extra.loc: node.loc = jsdict({ "start": jsdict({ "line": self.marker[1], "column": self.marker[2], }), "end": jsdict({ "line": self.marker[4], "column": self.marker[5], }), }) node = delegate.postProcess(node) def createLocationMarker(): if (not extra.loc) and (not extra.range): return None skipComment() return LocationMarker() def patch(): global advance, scanRegExp if ('undefined' if not ('tokens' in extra) else typeof(extra.tokens)) != "undefined": extra.advance = advance extra.scanRegExp = scanRegExp advance = collectToken scanRegExp = collectRegex def unpatch(): global advance, scanRegExp if ('undefined' if not ('scanRegExp' in extra) else typeof(extra.scanRegExp)) == "function": advance = extra.advance scanRegExp = extra.scanRegExp def tokenize(code, **options): global delegate, source, index, lineNumber, lineStart, length, lookahead, state, extra options = jsdict(options) toString = None token = None tokens = None toString = str if (('undefined' if not 'code' in locals() else typeof(code)) != "string") and (not isinstance(code, str)): code = toString(code) delegate = SyntaxTreeDelegate source = code index = 0 lineNumber = (1 if len(source) > 0 else 0) lineStart = 0 length = len(source) lookahead = None state = jsdict({ "allowIn": True, "labelSet": jsdict({ }), "inFunctionBody": False, "inIteration": False, "inSwitch": False, "lastCommentStart": -1, }) extra = jsdict({ }) options = options or jsdict({ }) options.tokens = True extra.tokens = [] extra.tokenize = True extra.openParenToken = -1 extra.openCurlyToken = -1 extra.range = (('undefined' if not ('range' in options) else typeof(options.range)) == "boolean") and options.range extra.loc = (('undefined' if not ('loc' in options) else typeof(options.loc)) == "boolean") and options.loc if (('undefined' if not ('comment' in options) else typeof(options.comment)) == "boolean") and options.comment: extra.comments = [] if (('undefined' if not ('tolerant' in options) else typeof(options.tolerant)) == "boolean") and options.tolerant: extra.errors = [] if length > 0: if (typeof(source[0])) == "undefined": if isinstance(code, str): source = code.valueOf() patch() try: peek() if lookahead.type == Token.EOF: return extra.tokens token = lex() while lookahead.type != Token.EOF: try: token = lex() except Exception as lexError: token = lookahead if extra.errors: extra.errors.append(lexError) break else: raise filterTokenLocation() tokens = extra.tokens if ('undefined' if not ('comments' in extra) else typeof(extra.comments)) != "undefined": tokens.comments = extra.comments if ('undefined' if not ('errors' in extra) else typeof(extra.errors)) != "undefined": tokens.errors = extra.errors except Exception as e: raise finally: unpatch() extra = jsdict({ }) return tokens def parse(code, **options): global delegate, source, index, lineNumber, lineStart, length, lookahead, state, extra options = jsdict(options) program = None toString = None toString = str if (('undefined' if not 'code' in locals() else typeof(code)) != "string") and (not isinstance(code, str)): code = toString(code) delegate = SyntaxTreeDelegate source = code index = 0 lineNumber = (1 if len(source) > 0 else 0) lineStart = 0 length = len(source) lookahead = None state = jsdict({ "allowIn": True, "labelSet": jsdict({ }), "inFunctionBody": False, "inIteration": False, "inSwitch": False, "lastCommentStart": -1, "markerStack": [], }) extra = jsdict({ }) if ('undefined' if not 'options' in locals() else typeof(options)) != "undefined": extra.range = (('undefined' if not ('range' in options) else typeof(options.range)) == "boolean") and options.range extra.loc = (('undefined' if not ('loc' in options) else typeof(options.loc)) == "boolean") and options.loc if (extra.loc and (options.source != None)) and (options.source != undefined): extra.source = toString(options.source) if (('undefined' if not ('tokens' in options) else typeof(options.tokens)) == "boolean") and options.tokens: extra.tokens = [] if (('undefined' if not ('comment' in options) else typeof(options.comment)) == "boolean") and options.comment: extra.comments = [] if (('undefined' if not ('tolerant' in options) else typeof(options.tolerant)) == "boolean") and options.tolerant: extra.errors = [] if length > 0: if (typeof(source[0])) == "undefined": if isinstance(code, str): source = code.valueOf() patch() try: program = parseProgram() if ('undefined' if not ('comments' in extra) else typeof(extra.comments)) != "undefined": program.comments = extra.comments if ('undefined' if not ('tokens' in extra) else typeof(extra.tokens)) != "undefined": filterTokenLocation() program.tokens = extra.tokens if ('undefined' if not ('errors' in extra) else typeof(extra.errors)) != "undefined": program.errors = extra.errors except Exception as e: raise finally: unpatch() extra = jsdict({ }) return program parse('var = 490 \n a=4;')
99,391
Python
.py
2,732
29.533675
5,076
0.62646
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,834
objects.py
evilhero_mylar/lib/js2py/legecy_translators/objects.py
""" This module removes all objects/arrays from JS source code and replace them with LVALS. Also it has s function translating removed object/array to python code. Use this module just after removing constants. Later move on to removing functions""" OBJECT_LVAL = 'PyJsLvalObject%d_' ARRAY_LVAL = 'PyJsLvalArray%d_' from utils import * from jsparser import * from nodevisitor import exp_translator import functions from flow import KEYWORD_METHODS def FUNC_TRANSLATOR(*a):# stupid import system in python raise RuntimeError('Remember to set func translator. Thank you.') def set_func_translator(ftrans): # stupid stupid Python or Peter global FUNC_TRANSLATOR FUNC_TRANSLATOR = ftrans def is_empty_object(n, last): """n may be the inside of block or object""" if n.strip(): return False # seems to be but can be empty code last = last.strip() markers = {')', ';',} if not last or last[-1] in markers: return False return True # todo refine this function def is_object(n, last): """n may be the inside of block or object. last is the code before object""" if is_empty_object(n, last): return True if not n.strip(): return False #Object contains lines of code so it cant be an object if len(argsplit(n, ';'))>1: return False cands = argsplit(n, ',') if not cands[-1].strip(): return True # {xxxx,} empty after last , it must be an object for cand in cands: cand = cand.strip() # separate each candidate element at : in dict and check whether they are correct... kv = argsplit(cand, ':') if len(kv) > 2: # set the len of kv to 2 because of this stupid : expression kv = kv[0],':'.join(kv[1:]) if len(kv)==2: # key value pair, check whether not label or ?: k, v = kv if not is_lval(k.strip()): return False v = v.strip() if v.startswith('function'): continue #will fail on label... {xxx: while {}} if v[0]=='{': # value cant be a code block return False for e in KEYWORD_METHODS: # if v starts with any statement then return false if v.startswith(e) and len(e)<len(v) and v[len(e)] not in IDENTIFIER_PART: return False elif not (cand.startswith('set ') or cand.startswith('get ')): return False return True def is_array(last): #it can be prop getter last = last.strip() if any(endswith_keyword(last, e) for e in {'return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof'}): return True markers = {')', ']'} return not last or not (last[-1] in markers or last[-1] in IDENTIFIER_PART) def remove_objects(code, count=1): """ This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count. count arg is the number that should be added to the LVAL of the first replaced object """ replacements = {} #replacement dict br = bracket_split(code, ['{}', '[]']) res = '' last = '' for e in br: #test whether e is an object if e[0]=='{': n, temp_rep, cand_count = remove_objects(e[1:-1], count) # if e was not an object then n should not contain any : if is_object(n, last): #e was an object res += ' '+OBJECT_LVAL % count replacements[OBJECT_LVAL % count] = e count += 1 else: # e was just a code block but could contain objects inside res += '{%s}' % n count = cand_count replacements.update(temp_rep) elif e[0]=='[': if is_array(last): res += e # will be translated later else: # prop get n, rep, count = remove_objects(e[1:-1], count) res += '[%s]' % n replacements.update(rep) else: # e does not contain any objects res += e last = e #needed to test for this stipid empty object return res, replacements, count def remove_arrays(code, count=1): """removes arrays and replaces them with ARRAY_LVALS returns new code and replacement dict *NOTE* has to be called AFTER remove objects""" res = '' last = '' replacements = {} for e in bracket_split(code, ['[]']): if e[0]=='[': if is_array(last): name = ARRAY_LVAL % count res += ' ' + name replacements[name] = e count += 1 else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array cand, new_replacements, count = remove_arrays(e[1:-1], count) res += '[%s]' % cand replacements.update(new_replacements) else: res += e last = e return res, replacements, count def translate_object(obj, lval, obj_count=1, arr_count=1): obj = obj[1:-1] # remove {} from both ends obj, obj_rep, obj_count = remove_objects(obj, obj_count) obj, arr_rep, arr_count = remove_arrays(obj, arr_count) # functions can be defined inside objects. exp translator cant translate them. # we have to remove them and translate with func translator # its better explained in translate_array function obj, hoisted, inline = functions.remove_functions(obj, all_inline=True) assert not hoisted gsetters_after = '' keys = argsplit(obj) res = [] for i, e in enumerate(keys, 1): e = e.strip() if e.startswith('set '): gsetters_after += translate_setter(lval, e) elif e.startswith('get '): gsetters_after += translate_getter(lval, e) elif ':' not in e: if i<len(keys): # can happen legally only in the last element {3:2,} raise SyntaxError('Unexpected "," in Object literal') break else: #Not getter, setter or elision spl = argsplit(e, ':') if len(spl)<2: raise SyntaxError('Invalid Object literal: '+e) try: key, value = spl except: #len(spl)> 2 print 'Unusual case ' + repr(e) key = spl[0] value = ':'.join(spl[1:]) key = key.strip() if is_internal(key): key = '%s.to_string().value' % key else: key = repr(key) value = exp_translator(value) if not value: raise SyntaxError('Missing value in Object literal') res.append('%s:%s' % (key, value)) res = '%s = Js({%s})\n' % (lval, ','.join(res)) + gsetters_after # translate all the nested objects (including removed earlier functions) for nested_name, nested_info in inline.iteritems(): # functions nested_block, nested_args = nested_info new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args) res = new_def + res for lval, obj in obj_rep.iteritems(): #objects new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count) # add object definition BEFORE array definition res = new_def + res for lval, obj in arr_rep.iteritems(): # arrays new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count) # add object definition BEFORE array definition res = new_def + res return res, obj_count, arr_count def translate_setter(lval, setter): func = 'function' + setter[3:] try: _, data, _ = functions.remove_functions(func) if not data or len(data)>1: raise Exception() except: raise SyntaxError('Could not parse setter: '+setter) prop = data.keys()[0] body, args = data[prop] if len(args)!=1: #setter must have exactly 1 argument raise SyntaxError('Invalid setter. It must take exactly 1 argument.') # now messy part res = FUNC_TRANSLATOR('setter', body, args) res += "%s.define_own_property(%s, {'set': setter})\n"%(lval, repr(prop)) return res def translate_getter(lval, getter): func = 'function' + getter[3:] try: _, data, _ = functions.remove_functions(func) if not data or len(data)>1: raise Exception() except: raise SyntaxError('Could not parse getter: '+getter) prop = data.keys()[0] body, args = data[prop] if len(args)!=0: #setter must have exactly 0 argument raise SyntaxError('Invalid getter. It must take exactly 0 argument.') # now messy part res = FUNC_TRANSLATOR('getter', body, args) res += "%s.define_own_property(%s, {'get': setter})\n"%(lval, repr(prop)) return res def translate_array(array, lval, obj_count=1, arr_count=1): """array has to be any js array for example [1,2,3] lval has to be name of this array. Returns python code that adds lval to the PY scope it should be put before lval""" array = array[1:-1] array, obj_rep, obj_count = remove_objects(array, obj_count) array, arr_rep, arr_count = remove_arrays(array, arr_count) #functions can be also defined in arrays, this caused many problems since in Python # functions cant be defined inside literal # remove functions (they dont contain arrays or objects so can be translated easily) # hoisted functions are treated like inline array, hoisted, inline = functions.remove_functions(array, all_inline=True) assert not hoisted arr = [] # separate elements in array for e in argsplit(array, ','): # translate expressions in array PyJsLvalInline will not be translated! e = exp_translator(e.replace('\n', '')) arr.append(e if e else 'None') arr = '%s = Js([%s])\n' % (lval, ','.join(arr)) #But we can have more code to add to define arrays/objects/functions defined inside this array # translate nested objects: # functions: for nested_name, nested_info in inline.iteritems(): nested_block, nested_args = nested_info new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args) arr = new_def + arr for lval, obj in obj_rep.iteritems(): new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count) # add object definition BEFORE array definition arr = new_def + arr for lval, obj in arr_rep.iteritems(): new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count) # add object definition BEFORE array definition arr = new_def + arr return arr, obj_count, arr_count if __name__=='__main__': test = 'a = {404:{494:19}}; b = 303; if () {f={:}; { }}' #print remove_objects(test) #print list(bracket_split(' {}')) print print remove_arrays('typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""], [][[5][5]])[1].toLowerCase()])') print is_object('', ')')
11,176
Python
.py
258
34.922481
128
0.601433
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,835
functions.py
evilhero_mylar/lib/js2py/legecy_translators/functions.py
"""This module removes JS functions from source code""" from jsparser import * from utils import * INLINE_NAME = 'PyJsLvalInline%d_' INLINE_COUNT = 0 PRE_EXP_STARTS = {'return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof'} PRE_ALLOWED = IDENTIFIER_PART.union({';', '{', '}', ']', ')', ':'}) INCREMENTS = {'++', '--'} def reset_inline_count(): global INLINE_COUNT INLINE_COUNT = 0 def remove_functions(source, all_inline=False): """removes functions and returns new source, and 2 dicts. first dict with removed hoisted(global) functions and second with replaced inline functions""" global INLINE_COUNT inline = {} hoisted = {} n = 0 limit = len(source) - 9 # 8 is length of 'function' res = '' last = 0 while n < limit: if n and source[n-1] in IDENTIFIER_PART: n+=1 continue if source[n:n+8] == 'function' and source[n+8] not in IDENTIFIER_PART: if source[:n].rstrip().endswith('.'): # allow function as a property name :) n+=1 continue if source[n+8:].lstrip().startswith(':'): # allow functions inside objects... n+=1 continue entered = n res += source[last:n] name = '' n = pass_white(source, n+8) if source[n] in IDENTIFIER_START: # hoisted function name, n= parse_identifier(source, n) args, n = pass_bracket(source, n, '()') if not args: raise SyntaxError('Function misses bracket with argnames ()') args = args.strip('() \n') args = tuple(parse_identifier(e, 0)[0] for e in argsplit(args)) if args else () if len(args) - len(set(args)): # I know its legal in JS but python does not allow duplicate argnames # I will not work around it raise SyntaxError('Function has duplicate argument names. Its not legal in this implementation. Sorry.') block, n = pass_bracket(source, n, '{}') if not block: raise SyntaxError('Function does not have any code block to execute') mixed = False # named function expression flag if name and not all_inline: # Here I will distinguish between named function expression (mixed) and a function statement before = source[:entered].rstrip() if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS): #print 'Ended ith keyword' mixed = True elif before and before[-1] not in PRE_ALLOWED and not before[-2:] in INCREMENTS: #print 'Ended with'+repr(before[-1]), before[-1]=='}' mixed = True else: #print 'FUNCTION STATEMENT' #its a function statement. # todo remove fucking label if present! hoisted[name] = block, args if not name or mixed or all_inline: # its a function expression (can be both named and not named) #print 'FUNCTION EXPRESSION' INLINE_COUNT += 1 iname = INLINE_NAME%INLINE_COUNT # inline name res += ' '+ iname inline['%s@%s' %(iname, name)] = block, args #here added real name at the end because it has to be added to the func scope last = n else: n+=1 res += source[last:] return res, hoisted, inline if __name__=='__main__': print remove_functions('5+5 function n (functiona ,functionaj) {dsd s, dsdd}')
3,698
Python
.py
78
35.833333
138
0.559059
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,836
translator.py
evilhero_mylar/lib/js2py/legecy_translators/translator.py
from flow import translate_flow from constants import remove_constants, recover_constants from objects import remove_objects, remove_arrays, translate_object, translate_array, set_func_translator from functions import remove_functions, reset_inline_count from jsparser import inject_before_lval, indent, dbg TOP_GLOBAL = '''from js2py.pyjs import *\nvar = Scope( JS_BUILTINS )\nset_global_object(var)\n''' def translate_js(js, top=TOP_GLOBAL): """js has to be a javascript source code. returns equivalent python code.""" # Remove constant literals no_const, constants = remove_constants(js) #print 'const count', len(constants) # Remove object literals no_obj, objects, obj_count = remove_objects(no_const) #print 'obj count', len(objects) # Remove arrays no_arr, arrays, arr_count = remove_arrays(no_obj) #print 'arr count', len(arrays) # Here remove and replace functions reset_inline_count() no_func, hoisted, inline = remove_functions(no_arr) #translate flow and expressions py_seed, to_register = translate_flow(no_func) # register variables and hoisted functions #top += '# register variables\n' top += 'var.registers(%s)\n' % str(to_register + hoisted.keys()) #Recover functions # hoisted functions recovery defs = '' #defs += '# define hoisted functions\n' #print len(hoisted) , 'HH'*40 for nested_name, nested_info in hoisted.iteritems(): nested_block, nested_args = nested_info new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args) new_code += 'PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name) defs += new_code +'\nvar.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name) #defs += '# Everting ready!\n' # inline functions recovery for nested_name, nested_info in inline.iteritems(): nested_block, nested_args = nested_info new_code = translate_func(nested_name, nested_block, nested_args) py_seed = inject_before_lval(py_seed, nested_name.split('@')[0], new_code) # add hoisted definitiond - they have literals that have to be recovered py_seed = defs + py_seed #Recover arrays for arr_lval, arr_code in arrays.iteritems(): translation, obj_count, arr_count = translate_array(arr_code, arr_lval, obj_count, arr_count) py_seed = inject_before_lval(py_seed, arr_lval, translation) #Recover objects for obj_lval, obj_code in objects.iteritems(): translation, obj_count, arr_count = translate_object(obj_code, obj_lval, obj_count, arr_count) py_seed = inject_before_lval(py_seed, obj_lval, translation) #Recover constants py_code = recover_constants(py_seed, constants) return top + py_code def translate_func(name, block, args): """Translates functions and all nested functions to Python code. name - name of that function (global functions will be available under var while inline will be available directly under this name ) block - code of the function (*with* brackets {} ) args - arguments that this function takes""" inline = name.startswith('PyJsLvalInline') real_name = '' if inline: name, real_name = name.split('@') arglist = ', '.join(args) +', ' if args else '' code = '@Js\ndef %s(%sthis, arguments, var=var):\n' % (name, arglist) # register local variables scope = "'this':this, 'arguments':arguments" #it will be a simple dictionary for arg in args: scope += ', %s:%s' %(repr(arg), arg) if real_name: scope += ', %s:%s' % (repr(real_name), name) code += indent('var = Scope({%s}, var)\n' % scope) block, nested_hoisted, nested_inline = remove_functions(block) py_code, to_register = translate_flow(block) #register variables declared with var and names of hoisted functions. to_register += nested_hoisted.keys() if to_register: code += indent('var.registers(%s)\n'% str(to_register)) for nested_name, info in nested_hoisted.iteritems(): nested_block, nested_args = info new_code = translate_func('PyJsLvalTempHoisted', nested_block, nested_args) # Now put definition of hoisted function on the top code += indent(new_code) code += indent('PyJsLvalTempHoisted.func_name = %s\n' %repr(nested_name)) code += indent('var.put(%s, PyJsLvalTempHoisted)\n' % repr(nested_name)) for nested_name, info in nested_inline.iteritems(): nested_block, nested_args = info new_code = translate_func(nested_name, nested_block, nested_args) # Inject definitions of inline functions just before usage # nested inline names have this format : LVAL_NAME@REAL_NAME py_code = inject_before_lval(py_code, nested_name.split('@')[0], new_code) if py_code.strip(): code += indent(py_code) return code set_func_translator(translate_func) #print inject_before_lval(' chuj\n moj\n lval\nelse\n', 'lval', 'siema\njestem piter\n') import time #print time.time() #print translate_js('if (1) console.log("Hello, World!"); else if (5) console.log("Hello world?");') #print time.time() t = """ var x = [1,2,3,4,5,6]; for (var e in x) {console.log(e); delete x[3];} console.log(5 in [1,2,3,4,5]); """ SANDBOX =''' import traceback try: %s except: print traceback.format_exc() print raw_input('Press Enter to quit') ''' if __name__=='__main__': # test with jq if works then it really works :) #with open('jq.js', 'r') as f: #jq = f.read() #res = translate_js(jq) res = translate_js(t) dbg(SANDBOX% indent(res)) print 'Done'
5,676
Python
.py
124
40.467742
105
0.674738
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,837
tokenize.py
evilhero_mylar/lib/js2py/legecy_translators/tokenize.py
from jsparser import * from utils import * # maybe I will try rewriting my parser in the future... Tokenizer makes things much easier and faster, unfortunately I # did not know anything about parsers when I was starting this project so I invented my own.
255
Python
.py
4
62.75
118
0.796813
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,838
injector.py
evilhero_mylar/lib/js2py/utils/injector.py
__all__ = ['fix_js_args'] import types import opcode import six if six.PY3: xrange = range chr = lambda x: x # Opcode constants used for comparison and replacecment LOAD_FAST = opcode.opmap['LOAD_FAST'] LOAD_GLOBAL = opcode.opmap['LOAD_GLOBAL'] STORE_FAST = opcode.opmap['STORE_FAST'] def fix_js_args(func): '''Use this function when unsure whether func takes this and arguments as its last 2 args. It will append 2 args if it does not.''' fcode = six.get_function_code(func) fargs = fcode.co_varnames[fcode.co_argcount-2:fcode.co_argcount] if fargs==('this', 'arguments') or fargs==('arguments', 'var'): return func code = append_arguments(six.get_function_code(func), ('this','arguments')) return types.FunctionType(code, six.get_function_globals(func), func.__name__, closure=six.get_function_closure(func)) def append_arguments(code_obj, new_locals): co_varnames = code_obj.co_varnames # Old locals co_names = code_obj.co_names # Old globals co_names+=tuple(e for e in new_locals if e not in co_names) co_argcount = code_obj.co_argcount # Argument count co_code = code_obj.co_code # The actual bytecode as a string # Make one pass over the bytecode to identify names that should be # left in code_obj.co_names. not_removed = set(opcode.hasname) - set([LOAD_GLOBAL]) saved_names = set() for inst in instructions(co_code): if inst[0] in not_removed: saved_names.add(co_names[inst[1]]) # Build co_names for the new code object. This should consist of # globals that were only accessed via LOAD_GLOBAL names = tuple(name for name in co_names if name not in set(new_locals) - saved_names) # Build a dictionary that maps the indices of the entries in co_names # to their entry in the new co_names name_translations = dict((co_names.index(name), i) for i, name in enumerate(names)) # Build co_varnames for the new code object. This should consist of # the entirety of co_varnames with new_locals spliced in after the # arguments new_locals_len = len(new_locals) varnames = (co_varnames[:co_argcount] + new_locals + co_varnames[co_argcount:]) # Build the dictionary that maps indices of entries in the old co_varnames # to their indices in the new co_varnames range1, range2 = xrange(co_argcount), xrange(co_argcount, len(co_varnames)) varname_translations = dict((i, i) for i in range1) varname_translations.update((i, i + new_locals_len) for i in range2) # Build the dictionary that maps indices of deleted entries of co_names # to their indices in the new co_varnames names_to_varnames = dict((co_names.index(name), varnames.index(name)) for name in new_locals) # Now we modify the actual bytecode modified = [] for inst in instructions(code_obj.co_code): # If the instruction is a LOAD_GLOBAL, we have to check to see if # it's one of the globals that we are replacing. Either way, # update its arg using the appropriate dict. if inst[0] == LOAD_GLOBAL: if inst[1] in names_to_varnames: inst[0] = LOAD_FAST inst[1] = names_to_varnames[inst[1]] elif inst[1] in name_translations: inst[1] = name_translations[inst[1]] else: raise ValueError("a name was lost in translation") # If it accesses co_varnames or co_names then update its argument. elif inst[0] in opcode.haslocal: inst[1] = varname_translations[inst[1]] elif inst[0] in opcode.hasname: inst[1] = name_translations[inst[1]] modified.extend(write_instruction(inst)) if six.PY2: code = ''.join(modified) args = (co_argcount + new_locals_len, code_obj.co_nlocals + new_locals_len, code_obj.co_stacksize, code_obj.co_flags, code, code_obj.co_consts, names, varnames, code_obj.co_filename, code_obj.co_name, code_obj.co_firstlineno, code_obj.co_lnotab, code_obj.co_freevars, code_obj.co_cellvars) else: #print(modified) code = bytes(modified) #print(code) args = (co_argcount + new_locals_len, 0, code_obj.co_nlocals + new_locals_len, code_obj.co_stacksize, code_obj.co_flags, code, code_obj.co_consts, names, varnames, code_obj.co_filename, code_obj.co_name, code_obj.co_firstlineno, code_obj.co_lnotab, code_obj.co_freevars, code_obj.co_cellvars) # Done modifying codestring - make the code object return types.CodeType(*args) def instructions(code): if six.PY2: code = map(ord, code) i, L = 0, len(code) extended_arg = 0 while i < L: op = code[i] i+= 1 if op < opcode.HAVE_ARGUMENT: yield [op, None] continue oparg = code[i] + (code[i+1] << 8) + extended_arg extended_arg = 0 i += 2 if op == opcode.EXTENDED_ARG: extended_arg = oparg << 16 continue yield [op, oparg] def write_instruction(inst): op, oparg = inst if oparg is None: return [chr(op)] elif oparg <= 65536: return [chr(op), chr(oparg & 255), chr((oparg >> 8) & 255)] elif oparg <= 4294967296: return [chr(opcode.EXTENDED_ARG), chr((oparg >> 16) & 255), chr((oparg >> 24) & 255), chr(op), chr(oparg & 255), chr((oparg >> 8) & 255)] else: raise ValueError("Invalid oparg: {0} is too large".format(oparg)) if __name__=='__main__': x = 'Wrong' dick = 3000 def func(a): print(x,y,z, a) print(dick) d = (x,) for e in (e for e in x): print(e) return x, y, z func2 =types.FunctionType(append_arguments(six.get_function_code(func), ('x', 'y', 'z')), six.get_function_globals(func), func.__name__, closure=six.get_function_closure(func)) args = (2,2,3,4),3,4 assert func2(1, *args) == args
6,745
Python
.py
159
31.798742
180
0.572975
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,839
definitions.py
evilhero_mylar/lib/js2py/utils/definitions.py
from pyparsing import * IdentifierStart = oneOf(['$', '_']+list(alphas)) Identifier = Combine(IdentifierStart + Optional(Word(alphas+nums+'$_'))) _keywords = ['break', 'do', 'instanceof', 'typeof', 'case', 'else', 'new', 'var', 'catch', 'finally', 'return', 'void', 'continue', 'for', 'switch', 'while', 'debugger', 'function', 'this', 'with', 'default', 'if', 'throw', 'delete', 'in', 'try'] Keyword = oneOf(_keywords) #Literals #Bool BooleanLiteral = oneOf(('true', 'false')) #Null NullLiteral = Literal('null') #Undefined UndefinedLiteral = Literal('undefined') #NaN NaNLiteral = Literal('NaN') #Number NonZeroDigit = oneOf(['1','2','3','4','5','6','7','8','9']) DecimalDigit = oneOf(['0', '1','2','3','4','5','6','7','8','9']) HexDigit = oneOf(list('0123456789abcdefABCDEF')) DecimalDigits = Word(nums) DecimalIntegerLiteral = Combine(NonZeroDigit+Optional(DecimalDigits)) | '0' SignedInteger = Combine('-'+DecimalDigits) | Combine('+'+DecimalDigits) | DecimalDigits ExponentPart = Combine(oneOf('e', 'E')+SignedInteger) _DecimalLiteral = (Combine(DecimalIntegerLiteral('int')+'.'+Optional(DecimalDigits('float'))+Optional(ExponentPart('exp'))) | Combine('.'+DecimalDigits('float')+Optional(ExponentPart('exp'))) | DecimalIntegerLiteral('int')+Optional(ExponentPart('exp'))) DecimalLiteral = Combine(_DecimalLiteral+NotAny(IdentifierStart)) HexIntegerLiteral = Combine(oneOf(('0x','0X'))+Word('0123456789abcdefABCDEF')('hex')) NumericLiteral = Group(DecimalLiteral)('decimal') ^ Group(HexIntegerLiteral)('hex') def js_num(num): res = NumericLiteral.parseString(num) if res.decimal: res = res.decimal cand = int(res.int if res.int else 0)+ float('0.'+res.float if res.float else 0) if res.exp: cand*= 10**int(res.exp) return cand elif res.hex: return int(res.hex.hex, 16) #String LineTerminator = White('\n', 1,1,1) | White('\r', 1,1,1) LineTerminatorSequence = Combine(White('\r', 1,1,1)+White('\n', 1,1,1)) | White('\n', 1,1,1) | White('\r', 1,1,1) LineContinuation = Combine('\\'+LineTerminatorSequence) UnicodeEscapeSequence = Combine('u'+HexDigit+HexDigit+HexDigit+HexDigit) HexEscapeSequence = Combine('x'+HexDigit+HexDigit) SingleEscapeCharacter = oneOf(["'", '"', '\\', 'b', 'f', 'n', 'r', 't', 'v']) EscapeCharacter = SingleEscapeCharacter | '0' | 'x' | 'u' # Changed DecimalDigit to 0 since it would match for example "\3" To verify.. NonEscapeCharacter = CharsNotIn([EscapeCharacter |LineTerminator]) CharacterEscapeSequence = SingleEscapeCharacter | NonEscapeCharacter EscapeSequence = CharacterEscapeSequence | Combine('0'+NotAny(DecimalDigit)) | HexEscapeSequence | UnicodeEscapeSequence SingleStringCharacter = CharsNotIn([LineTerminator | '\\' | "'"]) | Combine('\\'+EscapeSequence) | LineContinuation DoubleStringCharacter = CharsNotIn([LineTerminator | '\\' | '"']) | Combine('\\'+EscapeSequence) | LineContinuation StringLiteral = Combine('"'+ZeroOrMore(DoubleStringCharacter)+'"') ^ Combine("'"+ZeroOrMore(SingleStringCharacter)+"'") #Array #Dict
3,141
Python
.py
56
52.107143
137
0.688013
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,840
friendly_nodes.py
evilhero_mylar/lib/js2py/translators/friendly_nodes.py
import binascii from .pyjsparser import PyJsParser import six if six.PY3: basestring = str long = int xrange = range unicode = str REGEXP_CONVERTER = PyJsParser() def to_hex(s): return binascii.hexlify(s.encode('utf8')).decode('utf8') # fucking python 3, I hate it so much # wtf was wrong with s.encode('hex') ??? def indent(lines, ind=4): return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ') def inject_before_lval(source, lval, code): if source.count(lval)>1: print() print(lval) raise RuntimeError('To many lvals (%s)' % lval) elif not source.count(lval): print() print(lval) assert lval not in source raise RuntimeError('No lval found "%s"' % lval) end = source.index(lval) inj = source.rfind('\n', 0, end) ind = inj while source[ind+1]==' ': ind+=1 ind -= inj return source[:inj+1]+ indent(code, ind) + source[inj+1:] def get_continue_label(label): return CONTINUE_LABEL%to_hex(label) def get_break_label(label): return BREAK_LABEL%to_hex(label) def is_valid_py_name(name): try: compile(name+' = 11', 'a','exec') except: return False return True def indent(lines, ind=4): return ind*' '+lines.replace('\n', '\n'+ind*' ').rstrip(' ') def compose_regex(val): reg, flags = val #reg = REGEXP_CONVERTER._unescape_string(reg) return u'/%s/%s' % (reg, flags) def float_repr(f): if int(f)==f: return repr(int(f)) return repr(f) def argsplit(args, sep=','): """used to split JS args (it is not that simple as it seems because sep can be inside brackets). pass args *without* brackets! Used also to parse array and object elements, and more""" parsed_len = 0 last = 0 splits = [] for e in bracket_split(args, brackets=['()', '[]', '{}']): if e[0] not in {'(', '[', '{'}: for i, char in enumerate(e): if char==sep: splits.append(args[last:parsed_len+i]) last = parsed_len + i + 1 parsed_len += len(e) splits.append(args[last:]) return splits def bracket_split(source, brackets=('()','{}','[]'), strip=False): """DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)""" starts = [e[0] for e in brackets] in_bracket = 0 n = 0 last = 0 while n<len(source): e = source[n] if not in_bracket and e in starts: in_bracket = 1 start = n b_start, b_end = brackets[starts.index(e)] elif in_bracket: if e==b_start: in_bracket += 1 elif e==b_end: in_bracket -= 1 if not in_bracket: if source[last:start]: yield source[last:start] last = n+1 yield source[start+strip:n+1-strip] n+=1 if source[last:]: yield source[last:] def js_comma(a, b): return 'PyJsComma('+a+','+b+')' def js_or(a, b): return '('+a+' or '+b+')' def js_bor(a, b): return '('+a+'|'+b+')' def js_bxor(a, b): return '('+a+'^'+b+')' def js_band(a, b): return '('+a+'&'+b+')' def js_and(a, b): return '('+a+' and '+b+')' def js_strict_eq(a, b): return 'PyJsStrictEq('+a+','+b+')' def js_strict_neq(a, b): return 'PyJsStrictNeq('+a+','+b+')' #Not handled by python in the same way like JS. For example 2==2==True returns false. # In JS above would return true so we need brackets. def js_abstract_eq(a, b): return '('+a+'=='+b+')' #just like == def js_abstract_neq(a, b): return '('+a+'!='+b+')' def js_lt(a, b): return '('+a+'<'+b+')' def js_le(a, b): return '('+a+'<='+b+')' def js_ge(a, b): return '('+a+'>='+b+')' def js_gt(a, b): return '('+a+'>'+b+')' def js_in(a, b): return b+'.contains('+a+')' def js_instanceof(a, b): return a+'.instanceof('+b+')' def js_lshift(a, b): return '('+a+'<<'+b+')' def js_rshift(a, b): return '('+a+'>>'+b+')' def js_shit(a, b): return 'PyJsBshift('+a+','+b+')' def js_add(a, b): # To simplify later process of converting unary operators + and ++ return '(%s+%s)'%(a, b) def js_sub(a, b): # To simplify return '(%s-%s)'%(a, b) def js_mul(a, b): return '('+a+'*'+b+')' def js_div(a, b): return '('+a+'/'+b+')' def js_mod(a, b): return '('+a+'%'+b+')' def js_typeof(a): cand = list(bracket_split(a, ('()',))) if len(cand)==2 and cand[0]=='var.get': return cand[0]+cand[1][:-1]+',throw=False).typeof()' return a+'.typeof()' def js_void(a): # eval and return undefined return 'PyJsComma(%s, Js(None))' % a def js_new(a): cands = list(bracket_split(a, ('()',))) lim = len(cands) if lim < 2: return a + '.create()' n = 0 while n < lim: c = cands[n] if c[0]=='(': if cands[n-1].endswith('.get') and n+1>=lim: # last get operation. return a + '.create()' elif cands[n-1][0]=='(': return ''.join(cands[:n])+'.create' + c + ''.join(cands[n+1:]) elif cands[n-1]=='.callprop': beg = ''.join(cands[:n-1]) args = argsplit(c[1:-1],',') prop = args[0] new_args = ','.join(args[1:]) create = '.get(%s).create(%s)' % (prop, new_args) return beg + create + ''.join(cands[n+1:]) n+=1 return a + '.create()' def js_delete(a): #replace last get with delete. c = list(bracket_split(a, ['()'])) beg, arglist = ''.join(c[:-1]).strip(), c[-1].strip() #strips just to make sure... I will remove it later if beg[-4:]!='.get': print(a) raise SyntaxError('Invalid delete operation') return beg[:-3]+'delete'+arglist def js_neg(a): return '(-'+a+')' def js_pos(a): return '(+'+a+')' def js_inv(a): return '(~'+a+')' def js_not(a): return a+'.neg()' def js_postfix(a, inc, post): bra = list(bracket_split(a, ('()',))) meth = bra[-2] if not meth.endswith('get'): raise SyntaxError('Invalid ++ or -- operation.') bra[-2] = bra[-2][:-3] + 'put' bra[-1] = '(%s,Js(%s.to_number())%sJs(1))' % (bra[-1][1:-1], a, '+' if inc else '-') res = ''.join(bra) return res if not post else '(%s%sJs(1))' % (res, '-' if inc else '+') def js_pre_inc(a): return js_postfix(a, True, False) def js_post_inc(a): return js_postfix(a, True, True) def js_pre_dec(a): return js_postfix(a, False, False) def js_post_dec(a): return js_postfix(a, False, True) CONTINUE_LABEL = 'JS_CONTINUE_LABEL_%s' BREAK_LABEL = 'JS_BREAK_LABEL_%s' PREPARE = '''HOLDER = var.own.get(NAME)\nvar.force_own_put(NAME, PyExceptionToJs(PyJsTempException))\n''' RESTORE = '''if HOLDER is not None:\n var.own[NAME] = HOLDER\nelse:\n del var.own[NAME]\ndel HOLDER\n''' TRY_CATCH = '''%stry:\nBLOCKfinally:\n%s''' % (PREPARE, indent(RESTORE)) OR = {'||': js_or} AND = {'&&': js_and} BOR = {'|': js_bor} BXOR = {'^': js_bxor} BAND = {'&': js_band} EQS = {'===': js_strict_eq, '!==': js_strict_neq, '==': js_abstract_eq, # we need == and != too. Read a note above method '!=': js_abstract_neq} #Since JS does not have chained comparisons we need to implement all cmp methods. COMPS = {'<': js_lt, '<=': js_le, '>=': js_ge, '>': js_gt, 'instanceof': js_instanceof, #todo change to validitate 'in': js_in} BSHIFTS = {'<<': js_lshift, '>>': js_rshift, '>>>': js_shit} ADDS = {'+': js_add, '-': js_sub} MULTS = {'*': js_mul, '/': js_div, '%': js_mod} BINARY = {} BINARY.update(ADDS) BINARY.update(MULTS) BINARY.update(BSHIFTS) BINARY.update(COMPS) BINARY.update(EQS) BINARY.update(BAND) BINARY.update(BXOR) BINARY.update(BOR) BINARY.update(AND) BINARY.update(OR) #Note they dont contain ++ and -- methods because they both have 2 different methods # correct method will be found automatically in translate function UNARY = {'typeof': js_typeof, 'void': js_void, 'new': js_new, 'delete': js_delete, '!': js_not, '-': js_neg, '+': js_pos, '~': js_inv, '++': None, '--': None }
8,485
Python
.py
260
26.376923
110
0.540507
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,841
translating_nodes.py
evilhero_mylar/lib/js2py/translators/translating_nodes.py
from __future__ import unicode_literals from .pyjsparserdata import * from .friendly_nodes import * import random import six if six.PY3: from functools import reduce xrange = range unicode = str # number of characters above which expression will be split to multiple lines in order to avoid python parser stack overflow # still experimental so I suggest to set it to 400 in order to avoid common errors # set it to smaller value only if you have problems with parser stack overflow LINE_LEN_LIMIT = 400 # 200 # or any other value - the larger the smaller probability of errors :) class ForController: def __init__(self): self.inside = [False] self.update = '' def enter_for(self, update): self.inside.append(True) self.update = update def leave_for(self): self.inside.pop() def enter_other(self): self.inside.append(False) def leave_other(self): self.inside.pop() def is_inside(self): return self.inside[-1] class InlineStack: NAME = 'PyJs_%s_%d_' def __init__(self): self.reps = {} self.names = [] def inject_inlines(self, source): for lval in self.names: # first in first out! Its important by the way source = inject_before_lval(source, lval, self.reps[lval]) return source def require(self, typ): name = self.NAME % (typ, len(self.names)) self.names.append(name) return name def define(self, name, val): self.reps[name] = val def reset(self): self.rel = {} self.names = [] class ContextStack: def __init__(self): self.to_register = set([]) self.to_define = {} def reset(self): self.to_register = set([]) self.to_define = {} def register(self, var): self.to_register.add(var) def define(self, name, code): self.to_define[name] = code self.register(name) def get_code(self): code = 'var.registers([%s])\n' % ', '.join(repr(e) for e in self.to_register) for name, func_code in six.iteritems(self.to_define): code += func_code return code def clean_stacks(): global Context, inline_stack Context = ContextStack() inline_stack = InlineStack() def to_key(literal_or_identifier): ''' returns string representation of this object''' if literal_or_identifier['type']=='Identifier': return literal_or_identifier['name'] elif literal_or_identifier['type']=='Literal': k = literal_or_identifier['value'] if isinstance(k, float): return unicode(float_repr(k)) elif 'regex' in literal_or_identifier: return compose_regex(k) elif isinstance(k, bool): return 'true' if k else 'false' elif k is None: return 'null' else: return unicode(k) def trans(ele, standard=False): """Translates esprima syntax tree to python by delegating to appropriate translating node""" try: node = globals().get(ele['type']) if not node: raise NotImplementedError('%s is not supported!' % ele['type']) if standard: node = node.__dict__['standard'] if 'standard' in node.__dict__ else node return node(**ele) except: #print ele raise def limited(func): '''Decorator limiting resulting line length in order to avoid python parser stack overflow - If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line USE ONLY ON EXPRESSIONS!!! ''' def f(standard=False, **args): insert_pos = len(inline_stack.names) # in case line is longer than limit we will have to insert the lval at current position # this is because calling func will change inline_stack. # we cant use inline_stack.require here because we dont know whether line overflows yet res = func(**args) if len(res)>LINE_LEN_LIMIT: name = inline_stack.require('LONG') inline_stack.names.pop() inline_stack.names.insert(insert_pos, name) res = 'def %s(var=var):\n return %s\n' % (name, res) inline_stack.define(name, res) return name+'()' else: return res f.__dict__['standard'] = func return f # ==== IDENTIFIERS AND LITERALS ======= inf = float('inf') def Literal(type, value, raw, regex=None): if regex: # regex return 'JsRegExp(%s)' % repr(compose_regex(value)) elif value is None: # null return 'var.get(u"null")' # Todo template # String, Bool, Float return 'Js(%s)' % repr(value) if value!=inf else 'Js(float("inf"))' def Identifier(type, name): return 'var.get(%s)' % repr(name) @limited def MemberExpression(type, computed, object, property): far_left = trans(object) if computed: # obj[prop] type accessor # may be literal which is the same in every case so we can save some time on conversion if property['type'] == 'Literal': prop = repr(to_key(property)) else: # worst case prop = trans(property) else: # always the same since not computed (obj.prop accessor) prop = repr(to_key(property)) return far_left + '.get(%s)' % prop def ThisExpression(type): return 'var.get(u"this")' @limited def CallExpression(type, callee, arguments): arguments = [trans(e) for e in arguments] if callee['type']=='MemberExpression': far_left = trans(callee['object']) if callee['computed']: # obj[prop] type accessor # may be literal which is the same in every case so we can save some time on conversion if callee['property']['type'] == 'Literal': prop = repr(to_key(callee['property'])) else: # worst case prop = trans(callee['property']) # its not a string literal! so no repr else: # always the same since not computed (obj.prop accessor) prop = repr(to_key(callee['property'])) arguments.insert(0, prop) return far_left + '.callprop(%s)' % ', '.join(arguments) else: # standard call return trans(callee) + '(%s)' % ', '.join(arguments) # ========== ARRAYS ============ def ArrayExpression(type, elements): # todo fix null inside problem return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements) # ========== OBJECTS ============= def ObjectExpression(type, properties): name = inline_stack.require('Object') elems = [] after = '' for p in properties: if p['kind']=='init': elems.append('%s:%s' % Property(**p)) elif p['kind']=='set': k, setter = Property(**p) # setter is just a lval referring to that function, it will be defined in InlineStack automatically after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % (name, k, setter) elif p['kind']=='get': k, getter = Property(**p) after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % (name, k, getter) else: raise RuntimeError('Unexpected object propery kind') obj = '%s = Js({%s})\n' % (name, ','.join(elems)) inline_stack.define(name, obj+after) return name def Property(type, kind, key, computed, value, method, shorthand): if shorthand or computed: raise NotImplementedError('Shorthand and Computed properties not implemented!') k = to_key(key) if k is None: raise SyntaxError('Invalid key in dictionary! Or bug in Js2Py') v = trans(value) return repr(k), v # ========== EXPRESSIONS ============ @limited def UnaryExpression(type, operator, argument, prefix): a = trans(argument, standard=True) # unary involve some complex operations so we cant use line shorteners here if operator=='delete': if argument['type'] in {'Identifier', 'MemberExpression'}: # means that operation is valid return js_delete(a) return 'PyJsComma(%s, Js(True))' % a # otherwise not valid, just perform expression and return true. elif operator=='typeof': return js_typeof(a) return UNARY[operator](a) @limited def BinaryExpression(type, operator, left, right): a = trans(left) b = trans(right) # delegate to our friends return BINARY[operator](a,b) @limited def UpdateExpression(type, operator, argument, prefix): a = trans(argument, standard=True) # also complex operation involving parsing of the result so no line length reducing here return js_postfix(a, operator=='++', not prefix) @limited def AssignmentExpression(type, operator, left, right): operator = operator[:-1] if left['type']=='Identifier': if operator: return 'var.put(%s, %s, %s)' % (repr(to_key(left)), trans(right), repr(operator)) else: return 'var.put(%s, %s)' % (repr(to_key(left)), trans(right)) elif left['type']=='MemberExpression': far_left = trans(left['object']) if left['computed']: # obj[prop] type accessor # may be literal which is the same in every case so we can save some time on conversion if left['property']['type'] == 'Literal': prop = repr(to_key(left['property'])) else: # worst case prop = trans(left['property']) # its not a string literal! so no repr else: # always the same since not computed (obj.prop accessor) prop = repr(to_key(left['property'])) if operator: return far_left + '.put(%s, %s, %s)' % (prop, trans(right), repr(operator)) else: return far_left + '.put(%s, %s)' % (prop, trans(right)) else: raise SyntaxError('Invalid left hand side in assignment!') six @limited def SequenceExpression(type, expressions): return reduce(js_comma, (trans(e) for e in expressions)) @limited def NewExpression(type, callee, arguments): return trans(callee) + '.create(%s)' % ', '.join(trans(e) for e in arguments) @limited def ConditionalExpression(type, test, consequent, alternate): # caused plenty of problems in my home-made translator :) return '(%s if %s else %s)' % (trans(consequent), trans(test), trans(alternate)) # =========== STATEMENTS ============= def BlockStatement(type, body): return StatementList(body) # never returns empty string! In the worst case returns pass\n def ExpressionStatement(type, expression): return trans(expression) + '\n' # end expression space with new line def BreakStatement(type, label): if label: return 'raise %s("Breaked")\n' % (get_break_label(label['name'])) else: return 'break\n' def ContinueStatement(type, label): if label: return 'raise %s("Continued")\n' % (get_continue_label(label['name'])) else: return 'continue\n' def ReturnStatement(type, argument): return 'return %s\n' % (trans(argument) if argument else "var.get('undefined')") def EmptyStatement(type): return 'pass\n' def DebuggerStatement(type): return 'pass\n' def DoWhileStatement(type, body, test): inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n') result = 'while 1:\n' + indent(inside) return result def ForStatement(type, init, test, update, body): update = indent(trans(update)) if update else '' init = trans(init) if init else '' if not init.endswith('\n'): init += '\n' test = trans(test) if test else '1' if not update: result = '#for JS loop\n%swhile %s:\n%s%s\n' % (init, test, indent(trans(body)), update) else: result = '#for JS loop\n%swhile %s:\n' % (init, test) body = 'try:\n%sfinally:\n %s\n' % (indent(trans(body)), update) result += indent(body) return result def ForInStatement(type, left, right, body, each): res = 'for PyJsTemp in %s:\n' % trans(right) if left['type']=="VariableDeclaration": addon = trans(left) # make sure variable is registered if addon != 'pass\n': res = addon + res # we have to execute this expression :( # now extract the name try: name = left['declarations'][0]['id']['name'] except: raise RuntimeError('Unusual ForIn loop') elif left['type']=='Identifier': name = left['name'] else: raise RuntimeError('Unusual ForIn loop') res += indent('var.put(%s, PyJsTemp)\n' % repr(name) + trans(body)) return res def IfStatement(type, test, consequent, alternate): # NOTE we cannot do elif because function definition inside elif statement would not be possible! IF = 'if %s:\n' % trans(test) IF += indent(trans(consequent)) if not alternate: return IF ELSE = 'else:\n' + indent(trans(alternate)) return IF + ELSE def LabeledStatement(type, label, body): # todo consider using smarter approach! inside = trans(body) defs = '' if inside.startswith('while ') or inside.startswith('for ') or inside.startswith('#for'): # we have to add contine label as well... # 3 or 1 since #for loop type has more lines before real for. sep = 1 if not inside.startswith('#for') else 3 cont_label = get_continue_label(label['name']) temp = inside.split('\n') injected = 'try:\n'+'\n'.join(temp[sep:]) injected += 'except %s:\n pass\n'%cont_label inside = '\n'.join(temp[:sep])+'\n'+indent(injected) defs += 'class %s(Exception): pass\n'%cont_label break_label = get_break_label(label['name']) inside = 'try:\n%sexcept %s:\n pass\n'% (indent(inside), break_label) defs += 'class %s(Exception): pass\n'%break_label return defs + inside def StatementList(lis): if lis: # ensure we don't return empty string because it may ruin indentation! code = ''.join(trans(e) for e in lis) return code if code else 'pass\n' else: return 'pass\n' def PyimportStatement(type, imp): lib = imp['name'] jlib = 'PyImport_%s' % lib code = 'import %s as %s\n' % (lib, jlib) #check whether valid lib name... try: compile(code, '', 'exec') except: raise SyntaxError('Invalid Python module name (%s) in pyimport statement'%lib) # var.pyimport will handle module conversion to PyJs object code += 'var.pyimport(%s, %s)\n' % (repr(lib), jlib) return code def SwitchStatement(type, discriminant, cases): #TODO there will be a problem with continue in a switch statement.... FIX IT code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n') code = code % trans(discriminant) for case in cases: case_code = None if case['test']: # case (x): case_code = 'if SWITCHED or PyJsStrictEq(CONDITION, %s):\n' % (trans(case['test'])) else: # default: case_code = 'if True:\n' case_code += indent('SWITCHED = True\n') case_code += indent(StatementList(case['consequent'])) # one more indent for whole code += indent(case_code) # prevent infinite loop and sort out nested switch... code += indent('SWITCHED = True\nbreak\n') return code def ThrowStatement(type, argument): return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans(argument) def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer): result = 'try:\n%s' % indent(trans(block)) # complicated catch statement... if handler: identifier = handler['param']['name'] holder = 'PyJsHolder_%s_%d'%(to_hex(identifier), random.randrange(1e8)) identifier = repr(identifier) result += 'except PyJsException as PyJsTempException:\n' # fill in except ( catch ) block and remember to recover holder variable to its previous state result += indent(TRY_CATCH.replace('HOLDER', holder).replace('NAME', identifier).replace('BLOCK', indent(trans(handler['body'])))) # translate finally statement if present if finalizer: result += 'finally:\n%s' % indent(trans(finalizer)) return result def LexicalDeclaration(type, declarations, kind): raise NotImplementedError('let and const not implemented yet but they will be soon! Check github for updates.') def VariableDeclarator(type, id, init): name = id['name'] # register the name if not already registered Context.register(name) if init: return 'var.put(%s, %s)\n' % (repr(name), trans(init)) return '' def VariableDeclaration(type, declarations, kind): code = ''.join(trans(d) for d in declarations) return code if code else 'pass\n' def WhileStatement(type, test, body): result = 'while %s:\n'%trans(test) + indent(trans(body)) return result def WithStatement(type, object, body): raise NotImplementedError('With statement not implemented!') def Program(type, body): inline_stack.reset() code = ''.join(trans(e) for e in body) # here add hoisted elements (register variables and define functions) code = Context.get_code() + code # replace all inline variables code = inline_stack.inject_inlines(code) return code # ======== FUNCTIONS ============ def FunctionDeclaration(type, id, params, defaults, body, generator, expression): if generator: raise NotImplementedError('Generators not supported') if defaults: raise NotImplementedError('Defaults not supported') if not id: return FunctionExpression(type, id, params, defaults, body, generator, expression) JsName = id['name'] PyName = 'PyJsHoisted_%s_' % JsName PyName = PyName if is_valid_py_name(PyName) else 'PyJsHoistedNonPyName' # this is quite complicated global Context previous_context = Context # change context to the context of this function Context = ContextStack() # translate body within current context code = trans(body) # get arg names vars = [v['name'] for v in params] # args are automaticaly registered variables Context.to_register.update(vars) # add all hoisted elements inside function code = Context.get_code() + code # check whether args are valid python names: used_vars = [] for v in vars: if is_valid_py_name(v): used_vars.append(v) else: # invalid arg in python, for example $, replace with alternatice arg used_vars.append('PyJsArg_%s_' % to_hex(v)) header = '@Js\n' header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else '')) # transfer names from Py scope to Js scope arg_map = dict(zip(vars, used_vars)) arg_map.update({'this':'this', 'arguments':'arguments'}) arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in six.iteritems(arg_map)) # and finally set the name of the function to its real name: footer = '%s.func_name = %s\n' % (PyName, repr(JsName)) footer+= 'var.put(%s, %s)\n' % (repr(JsName), PyName) whole_code = header + indent(arg_conv+code) + footer # restore context Context = previous_context # define in upper context Context.define(JsName, whole_code) return 'pass\n' def FunctionExpression(type, id, params, defaults, body, generator, expression): if generator: raise NotImplementedError('Generators not supported') if defaults: raise NotImplementedError('Defaults not supported') JsName = id['name'] if id else 'anonymous' if not is_valid_py_name(JsName): ScriptName = 'InlineNonPyName' else: ScriptName = JsName PyName = inline_stack.require(ScriptName) # this is unique # again quite complicated global Context previous_context = Context # change context to the context of this function Context = ContextStack() # translate body within current context code = trans(body) # get arg names vars = [v['name'] for v in params] # args are automaticaly registered variables Context.to_register.update(vars) # add all hoisted elements inside function code = Context.get_code() + code # check whether args are valid python names: used_vars = [] for v in vars: if is_valid_py_name(v): used_vars.append(v) else: # invalid arg in python, for example $, replace with alternatice arg used_vars.append('PyJsArg_%s_' % to_hex(v)) header = '@Js\n' header+= 'def %s(%sthis, arguments, var=var):\n' % (PyName, ', '.join(used_vars) +(', ' if vars else '')) # transfer names from Py scope to Js scope arg_map = dict(zip(vars, used_vars)) arg_map.update({'this':'this', 'arguments':'arguments'}) if id: # make self available from inside... if id['name'] not in arg_map: arg_map[id['name']] = PyName arg_conv = 'var = Scope({%s}, var)\n' % ', '.join(repr(k)+':'+v for k,v in six.iteritems(arg_map)) # and finally set the name of the function to its real name: footer = '%s._set_name(%s)\n' % (PyName, repr(JsName)) whole_code = header + indent(arg_conv+code) + footer # restore context Context = previous_context # define in upper context inline_stack.define(PyName, whole_code) return PyName LogicalExpression = BinaryExpression PostfixExpression = UpdateExpression clean_stacks() if __name__=='__main__': import codecs import time import pyjsparser c = None#'''`ijfdij`''' if not c: with codecs.open("esp.js", "r", "utf-8") as f: c = f.read() print('Started') t = time.time() res = trans(pyjsparser.PyJsParser().parse(c)) dt = time.time() - t+ 0.000000001 print('Translated everyting in', round(dt,5), 'seconds.') print('Thats %d characters per second' % int(len(c)/dt)) with open('res.py', 'w') as f: f.write(res)
22,167
Python
.py
514
36.418288
138
0.635882
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,842
pyjsparserdata.py
evilhero_mylar/lib/js2py/translators/pyjsparserdata.py
# The MIT License # # Copyright 2014, 2015 Piotr Dabkowski # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject # to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE from __future__ import unicode_literals import sys import unicodedata import six from collections import defaultdict if six.PY3: unichr = chr xrange = range unicode = str token = { 'BooleanLiteral': 1, 'EOF': 2, 'Identifier': 3, 'Keyword': 4, 'NullLiteral': 5, 'NumericLiteral': 6, 'Punctuator': 7, 'StringLiteral': 8, 'RegularExpression': 9, 'Template': 10 } TokenName = {v:k for k,v in token.items()} FnExprTokens = ['(', '{', '[', 'in', 'typeof', 'instanceof', 'new', 'return', 'case', 'delete', 'throw', 'void', # assignment operators '=', '+=', '-=', '*=', '/=', '%=', '<<=', '>>=', '>>>=', '&=', '|=', '^=', ',', # binary/unary operators '+', '-', '*', '/', '%', '++', '--', '<<', '>>', '>>>', '&', '|', '^', '!', '~', '&&', '||', '?', ':', '===', '==', '>=', '<=', '<', '>', '!=', '!=='] syntax= {'AssignmentExpression', 'AssignmentPattern', 'ArrayExpression', 'ArrayPattern', 'ArrowFunctionExpression', 'BlockStatement', 'BinaryExpression', 'BreakStatement', 'CallExpression', 'CatchClause', 'ClassBody', 'ClassDeclaration', 'ClassExpression', 'ConditionalExpression', 'ContinueStatement', 'DoWhileStatement', 'DebuggerStatement', 'EmptyStatement', 'ExportAllDeclaration', 'ExportDefaultDeclaration', 'ExportNamedDeclaration', 'ExportSpecifier', 'ExpressionStatement', 'ForStatement', 'ForInStatement', 'FunctionDeclaration', 'FunctionExpression', 'Identifier', 'IfStatement', 'ImportDeclaration', 'ImportDefaultSpecifier', 'ImportNamespaceSpecifier', 'ImportSpecifier', 'Literal', 'LabeledStatement', 'LogicalExpression', 'MemberExpression', 'MethodDefinition', 'NewExpression', 'ObjectExpression', 'ObjectPattern', 'Program', 'Property', 'RestElement', 'ReturnStatement', 'SequenceExpression', 'SpreadElement', 'Super', 'SwitchCase', 'SwitchStatement', 'TaggedTemplateExpression', 'TemplateElement', 'TemplateLiteral', 'ThisExpression', 'ThrowStatement', 'TryStatement', 'UnaryExpression', 'UpdateExpression', 'VariableDeclaration', 'VariableDeclarator', 'WhileStatement', 'WithStatement'} # Error messages should be identical to V8. messages = { 'UnexpectedToken': 'Unexpected token %s', 'UnexpectedNumber': 'Unexpected number', 'UnexpectedString': 'Unexpected string', 'UnexpectedIdentifier': 'Unexpected identifier', 'UnexpectedReserved': 'Unexpected reserved word', 'UnexpectedTemplate': 'Unexpected quasi %s', 'UnexpectedEOS': 'Unexpected end of input', 'NewlineAfterThrow': 'Illegal newline after throw', 'InvalidRegExp': 'Invalid regular expression', 'UnterminatedRegExp': 'Invalid regular expression: missing /', 'InvalidLHSInAssignment': 'Invalid left-hand side in assignment', 'InvalidLHSInForIn': 'Invalid left-hand side in for-in', 'MultipleDefaultsInSwitch': 'More than one default clause in switch statement', 'NoCatchOrFinally': 'Missing catch or finally after try', 'UnknownLabel': 'Undefined label \'%s\'', 'Redeclaration': '%s \'%s\' has already been declared', 'IllegalContinue': 'Illegal continue statement', 'IllegalBreak': 'Illegal break statement', 'IllegalReturn': 'Illegal return statement', 'StrictModeWith': 'Strict mode code may not include a with statement', 'StrictCatchVariable': 'Catch variable may not be eval or arguments in strict mode', 'StrictVarName': 'Variable name may not be eval or arguments in strict mode', 'StrictParamName': 'Parameter name eval or arguments is not allowed in strict mode', 'StrictParamDupe': 'Strict mode function may not have duplicate parameter names', 'StrictFunctionName': 'Function name may not be eval or arguments in strict mode', 'StrictOctalLiteral': 'Octal literals are not allowed in strict mode.', 'StrictDelete': 'Delete of an unqualified identifier in strict mode.', 'StrictLHSAssignment': 'Assignment to eval or arguments is not allowed in strict mode', 'StrictLHSPostfix': 'Postfix increment/decrement may not have eval or arguments operand in strict mode', 'StrictLHSPrefix': 'Prefix increment/decrement may not have eval or arguments operand in strict mode', 'StrictReservedWord': 'Use of future reserved word in strict mode', 'TemplateOctalLiteral': 'Octal literals are not allowed in template strings.', 'ParameterAfterRestParameter': 'Rest parameter must be last formal parameter', 'DefaultRestParameter': 'Unexpected token =', 'ObjectPatternAsRestParameter': 'Unexpected token {', 'DuplicateProtoProperty': 'Duplicate __proto__ fields are not allowed in object literals', 'ConstructorSpecialMethod': 'Class constructor may not be an accessor', 'DuplicateConstructor': 'A class may only have one constructor', 'StaticPrototype': 'Classes may not have static property named prototype', 'MissingFromClause': 'Unexpected token', 'NoAsAfterImportNamespace': 'Unexpected token', 'InvalidModuleSpecifier': 'Unexpected token', 'IllegalImportDeclaration': 'Unexpected token', 'IllegalExportDeclaration': 'Unexpected token'} PRECEDENCE = {'||':1, '&&':2, '|':3, '^':4, '&':5, '==':6, '!=':6, '===':6, '!==':6, '<':7, '>':7, '<=':7, '>=':7, 'instanceof':7, 'in':7, '<<':8, '>>':8, '>>>':8, '+':9, '-':9, '*':11, '/':11, '%':11} class Token: pass class Syntax: pass class Messages: pass class PlaceHolders: ArrowParameterPlaceHolder = 'ArrowParameterPlaceHolder' for k,v in token.items(): setattr(Token, k, v) for e in syntax: setattr(Syntax, e, e) for k,v in messages.items(): setattr(Messages, k, v) #http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category BOM = u'\uFEFF' ZWJ = u'\u200D' ZWNJ = u'\u200C' TAB = u'\u0009' VT = u'\u000B' FF = u'\u000C' SP = u'\u0020' NBSP = u'\u00A0' LF = u'\u000A' CR = u'\u000D' LS = u'\u2028' PS = u'\u2029' U_CATEGORIES = defaultdict(list) for c in map(unichr, range(sys.maxunicode + 1)): U_CATEGORIES[unicodedata.category(c)].append(c) UNICODE_LETTER = set(U_CATEGORIES['Lu']+U_CATEGORIES['Ll']+ U_CATEGORIES['Lt']+U_CATEGORIES['Lm']+ U_CATEGORIES['Lo']+U_CATEGORIES['Nl']) UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn']+U_CATEGORIES['Mc']) UNICODE_DIGIT = set(U_CATEGORIES['Nd']) UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc']) IDENTIFIER_START = UNICODE_LETTER.union({'$','_', '\\'}) # and some fucking unicode escape sequence IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT).union(UNICODE_CONNECTOR_PUNCTUATION).union({ZWJ, ZWNJ}) WHITE_SPACE = {0x20, 0x09, 0x0B, 0x0C, 0xA0, 0x1680, 0x180E, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x202F, 0x205F, 0x3000, 0xFEFF} LINE_TERMINATORS = {0x0A, 0x0D, 0x2028, 0x2029} def isIdentifierStart(ch): return (ch if isinstance(ch, unicode) else unichr(ch)) in IDENTIFIER_START def isIdentifierPart(ch): return (ch if isinstance(ch, unicode) else unichr(ch)) in IDENTIFIER_PART def isWhiteSpace(ch): return (ord(ch) if isinstance(ch, unicode) else ch) in WHITE_SPACE def isLineTerminator(ch): return (ord(ch) if isinstance(ch, unicode) else ch) in LINE_TERMINATORS OCTAL = {'0', '1', '2', '3', '4', '5', '6', '7'} DEC = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} HEX = set('0123456789abcdefABCDEF') HEX_CONV = {'0123456789abcdef'[n]:n for n in xrange(16)} for i,e in enumerate('ABCDEF', 10): HEX_CONV[e] = i def isDecimalDigit(ch): return (ch if isinstance(ch, unicode) else unichr(ch)) in DEC def isHexDigit(ch): return (ch if isinstance(ch, unicode) else unichr(ch)) in HEX def isOctalDigit(ch): return (ch if isinstance(ch, unicode) else unichr(ch)) in OCTAL def isFutureReservedWord(w): return w in { 'enum', 'export', 'import', 'super'} def isStrictModeReservedWord(w): return w in {'implements', 'interface', 'package', 'private', 'protected', 'public', 'static', 'yield', 'let'} def isRestrictedWord(w): return w in {'eval', 'arguments'} def isKeyword(w): # 'const' is specialized as Keyword in V8. # 'yield' and 'let' are for compatibility with SpiderMonkey and ES.next. # Some others are from future reserved words. return w in {'if', 'in', 'do', 'var', 'for', 'new', 'try', 'let', 'this', 'else', 'case', 'void', 'with', 'enum', 'while', 'break', 'catch', 'throw', 'const', 'yield', 'class', 'super', 'return', 'typeof', 'delete', 'switch', 'export', 'import', 'default', 'finally', 'extends', 'function', 'continue', 'debugger', 'instanceof', 'pyimport'} class JsSyntaxError(Exception): pass if __name__=='__main__': assert isLineTerminator('\n') assert isLineTerminator(0x0A) assert isIdentifierStart('$') assert isIdentifierStart(100) assert isWhiteSpace(' ')
11,264
Python
.py
263
35.13308
141
0.621592
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,843
jsregexps.py
evilhero_mylar/lib/js2py/translators/jsregexps.py
from pyjsparserdata import * REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'} NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ??? CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'} CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'} CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'} def SpecialChar(char): return {'type': 'SpecialChar', 'content': char} def isPatternCharacter(char): return char not in NOT_PATTERN_CHARS class JsRegExpParser: def __init__(self, source, flags): self.source = source self.flags = flags self.index = 0 self.length = len(source) self.lineNumber = 0 self.lineStart = 0 def parsePattern(self): '''Perform sctring escape - for regexp literals''' return {'type': 'Pattern', 'contents': self.parseDisjunction()} def parseDisjunction(self): alternatives = [] while True: alternatives.append(self.parseAlternative()) if not self.isEOF(): self.expect_character('|') else: break return {'type': 'Disjunction', 'contents': alternatives} def isEOF(self): if self.index>=self.length: return True return False def expect_character(self, character): if self.source[self.index]!=character: self.throwUnexpected(character) self.index += 1 def parseAlternative(self): contents = [] while not self.isEOF() and self.source[self.index]!='|': contents.append(self.parseTerm()) return {'type': 'Alternative', 'contents': contents} def follows(self, chars): for i, c in enumerate(chars): if self.index+i>=self.length or self.source[self.index+i] != c: return False return True def parseTerm(self): assertion = self.parseAssertion() if assertion: return assertion else: return {'type': 'Term', 'contents': self.parseAtom()} # quantifier will go inside atom! def parseAssertion(self): if self.follows('$'): content = SpecialChar('$') self.index += 1 elif self.follows('^'): content = SpecialChar('^') self.index += 1 elif self.follows('\\b'): content = SpecialChar('\\b') self.index += 2 elif self.follows('\\B'): content = SpecialChar('\\B') self.index += 2 elif self.follows('(?='): self.index += 3 dis = self.parseDisjunction() self.expect_character(')') content = {'type': 'Lookached', 'contents': dis, 'negated': False} elif self.follows('(?!'): self.index += 3 dis = self.parseDisjunction() self.expect_character(')') content = {'type': 'Lookached', 'contents': dis, 'negated': True} else: return None return {'type': 'Assertion', 'content': content} def parseAtom(self): if self.follows('.'): content = SpecialChar('.') self.index += 1 elif self.follows('\\'): self.index += 1 content = self.parseAtomEscape() elif self.follows('['): content = self.parseCharacterClass() elif self.follows('(?:'): self.index += 3 dis = self.parseDisjunction() self.expect_character(')') content = 'idk' elif self.follows('('): self.index += 1 dis = self.parseDisjunction() self.expect_character(')') content = 'idk' elif isPatternCharacter(self.source[self.index]): content = self.source[self.index] self.index += 1 else: return None quantifier = self.parseQuantifier() return {'type': 'Atom', 'content': content, 'quantifier': quantifier} def parseQuantifier(self): prefix = self.parseQuantifierPrefix() if not prefix: return None greedy = True if self.follows('?'): self.index += 1 greedy = False return {'type': 'Quantifier', 'contents': prefix, 'greedy': greedy} def parseQuantifierPrefix(self): if self.isEOF(): return None if self.follows('+'): content = '+' self.index += 1 elif self.follows('?'): content = '?' self.index += 1 elif self.follows('*'): content = '*' self.index += 1 elif self.follows('{'): # try matching otherwise return None and restore the state i = self.index self.index += 1 digs1 = self.scanDecimalDigs() # if no minimal number of digs provided then return no quantifier if not digs1: self.index = i return None # scan char limit if provided if self.follows(','): self.index += 1 digs2 = self.scanDecimalDigs() else: digs2 = '' # must be valid! if not self.follows('}'): self.index = i return None else: self.expect_character('}') content = int(digs1), int(digs2) if digs2 else None else: return None return content def parseAtomEscape(self): ch = self.source[self.index] if isDecimalDigit(ch) and ch!=0: digs = self.scanDecimalDigs() elif ch in CHAR_CLASS_ESCAPE: self.index += 1 return SpecialChar('\\' + ch) else: return self.parseCharacterEscape() def parseCharacterEscape(self): ch = self.source[self.index] if ch in CONTROL_ESCAPE_CHARS: return SpecialChar('\\' + ch) if ch=='c': 'ok, fuck this shit.' def scanDecimalDigs(self): s = self.index while not self.isEOF() and isDecimalDigit(self.source[self.index]): self.index += 1 return self.source[s:self.index] a = JsRegExpParser('a(?=x)', '') print(a.parsePattern())
6,875
Python
.py
189
25.179894
118
0.494667
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,844
__init__.py
evilhero_mylar/lib/js2py/translators/__init__.py
# The MIT License # # Copyright 2014, 2015 Piotr Dabkowski # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject # to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE __all__ = ['PyJsParser', 'Node', 'WrappingNode', 'node_to_dict', 'parse', 'translate_js', 'translate', 'syntax_tree_translate', 'DEFAULT_HEADER'] __author__ = 'Piotr Dabkowski' __version__ = '2.2.0' from .pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict from .translator import translate_js, trasnlate, syntax_tree_translate, DEFAULT_HEADER def parse(javascript_code): """Returns syntax tree of javascript_code. Syntax tree has the same structure as syntax tree produced by esprima.js Same as PyJsParser().parse For your convenience :) """ p = PyJsParser() return p.parse(javascript_code)
1,757
Python
.py
31
54.354839
127
0.76498
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,845
pyjsparser.py
evilhero_mylar/lib/js2py/translators/pyjsparser.py
# The MIT License # # Copyright 2014, 2015 Piotr Dabkowski # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject # to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE from __future__ import unicode_literals from .pyjsparserdata import * from .std_nodes import * from pprint import pprint REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.', '[', ']', '(', ')', '{', '{', '|', '-'} import six if six.PY3: basestring = str long = int xrange = range unicode = str ESPRIMA_VERSION = '2.2.0' DEBUG = False # Small naming convention changes # len -> leng # id -> d # type -> typ # str -> st true = True false = False null = None class PyJsParser: """ Usage: parser = PyJsParser() parser.parse('var JavaScriptCode = 5.1') """ def __init__(self): self.clean() def test(self, code): pprint(self.parse(code)) def clean(self): self.strict = None self.sourceType = None self.index = 0 self.lineNumber = 1 self.lineStart = 0 self.hasLineTerminator = None self.lastIndex = None self.lastLineNumber = None self.lastLineStart = None self.startIndex = None self.startLineNumber = None self.startLineStart = None self.scanning = None self.lookahead = None self.state = None self.extra = None self.isBindingElement = None self.isAssignmentTarget = None self.firstCoverInitializedNameError = None # 7.4 Comments def skipSingleLineComment(self, offset): start = self.index - offset; while self.index < self.length: ch = self.source[self.index]; self.index += 1 if isLineTerminator(ch): if (ord(ch) == 13 and ord(self.source[self.index]) == 10): self.index += 1 self.lineNumber += 1 self.hasLineTerminator = True self.lineStart = self.index return def skipMultiLineComment(self): while self.index < self.length: ch = ord(self.source[self.index]) if isLineTerminator(ch): if (ch == 0x0D and ord(self.source[self.index+1]) == 0x0A): self.index += 1 self.lineNumber += 1 self.index += 1 self.hasLineTerminator = True self.lineStart = self.index elif ch == 0x2A: # Block comment ends with '*/'. if ord(self.source[self.index+1]) == 0x2F: self.index += 2 return self.index += 1 else: self.index += 1 self.tolerateUnexpectedToken() def skipComment(self): self.hasLineTerminator = False start = (self.index==0) while self.index < self.length: ch = ord(self.source[self.index]) if isWhiteSpace(ch): self.index += 1 elif isLineTerminator(ch): self.hasLineTerminator = True self.index += 1 if (ch == 0x0D and ord(self.source[self.index]) == 0x0A): self.index += 1 self.lineNumber += 1 self.lineStart = self.index start = True elif (ch == 0x2F): # U+002F is '/' ch = ord(self.source[self.index+1]) if (ch == 0x2F): self.index += 2 self.skipSingleLineComment(2) start = True elif (ch == 0x2A): # U+002A is '*' self.index += 2 self.skipMultiLineComment() else: break elif (start and ch == 0x2D): # U+002D is '-' # U+003E is '>' if (ord(self.source[self.index+1]) == 0x2D) and (ord(self.source[self.index+2]) == 0x3E): # '-->' is a single-line comment self.index += 3 self.skipSingleLineComment(3) else: break elif (ch == 0x3C): # U+003C is '<' if self.source[self.index+1: self.index+4]=='!--': # <!-- self.index += 4 self.skipSingleLineComment(4) else: break else: break def scanHexEscape(self, prefix): code = 0 leng = 4 if (prefix == 'u') else 2 for i in xrange(leng): if self.index < self.length and isHexDigit(self.source[self.index]): ch = self.source[self.index] self.index += 1 code = code * 16 + HEX_CONV[ch] else: return '' return unichr(code) def scanUnicodeCodePointEscape(self): ch = self.source[self.index] code = 0 # At least, one hex digit is required. if ch == '}': self.throwUnexpectedToken() while (self.index < self.length): ch = self.source[self.index] self.index += 1 if not isHexDigit(ch): break code = code * 16 + HEX_CONV[ch] if code > 0x10FFFF or ch != '}': self.throwUnexpectedToken() # UTF-16 Encoding if (code <= 0xFFFF): return unichr(code) cu1 = ((code - 0x10000) >> 10) + 0xD800; cu2 = ((code - 0x10000) & 1023) + 0xDC00; return unichr(cu1)+unichr(cu2) def ccode(self, offset=0): return ord(self.source[self.index+offset]) def log_err_case(self): if not DEBUG: return print('INDEX', self.index) print(self.source[self.index-10:self.index+10]) print('') def at(self, loc): return None if loc>=self.length else self.source[loc] def substr(self, le, offset=0): return self.source[self.index+offset:self.index+offset+le] def getEscapedIdentifier(self): d = self.source[self.index] ch = ord(d) self.index += 1 # '\u' (U+005C, U+0075) denotes an escaped character. if (ch == 0x5C): if (ord(self.source[self.index]) != 0x75): self.throwUnexpectedToken() self.index += 1 ch = self.scanHexEscape('u') if not ch or ch == '\\' or not isIdentifierStart(ch[0]): self.throwUnexpectedToken() d = ch while (self.index < self.length): ch = self.ccode() if not isIdentifierPart(ch): break self.index += 1 d += unichr(ch) # '\u' (U+005C, U+0075) denotes an escaped character. if (ch == 0x5C): d = d[0: len(d)-1] if (self.ccode() != 0x75): self.throwUnexpectedToken() self.index += 1 ch = self.scanHexEscape('u'); if (not ch or ch == '\\' or not isIdentifierPart(ch[0])): self.throwUnexpectedToken() d += ch return d def getIdentifier(self): start = self.index self.index += 1 while (self.index < self.length): ch = self.ccode() if (ch == 0x5C): # Blackslash (U+005C) marks Unicode escape sequence. self.index = start return self.getEscapedIdentifier() if (isIdentifierPart(ch)): self.index += 1 else: break return self.source[start: self.index] def scanIdentifier(self): start = self.index # Backslash (U+005C) starts an escaped character. d = self.getEscapedIdentifier() if (self.ccode() == 0x5C) else self.getIdentifier() # There is no keyword or literal with only one character. # Thus, it must be an identifier. if (len(d)==1): type = Token.Identifier elif (isKeyword(d)): type = Token.Keyword elif (d == 'null'): type = Token.NullLiteral elif (i == 'true' or d == 'false'): type = Token.BooleanLiteral else: type = Token.Identifier; return { 'type': type, 'value': d, 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index } # 7.7 Punctuators def scanPunctuator(self): token = { 'type': Token.Punctuator, 'value': '', 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': self.index, 'end': self.index } # Check for most common single-character punctuators. st = self.source[self.index] if st == '{': self.state['curlyStack'].append('{') self.index += 1 elif st == '}': self.index += 1 self.state['curlyStack'].pop() elif st in {'.', '(', ')', ';', ',', '[', ']', ':', '?', '~'}: self.index += 1 else: # 4-character punctuator. st = self.substr(4) if (st == '>>>='): self.index += 4 else: # 3-character punctuators. st = st[0:3] if st in {'===', '!==', '>>>', '<<=', '>>='}: self.index += 3 else: # 2-character punctuators. st = st[0:2] if st in {'&&','||','==','!=','+=','-=','*=' ,'/=' ,'++' , '--' , '<<', '>>', '&=', '|=', '^=', '%=', '<=', '>=', '=>'}: self.index += 2 else: # 1-character punctuators. st = self.source[self.index] if st in {'<', '>', '=', '!', '+', '-', '*', '%', '&', '|', '^', '/'}: self.index += 1 if self.index == token['start']: self.throwUnexpectedToken() token['end'] = self.index; token['value'] = st return token # 7.8.3 Numeric Literals def scanHexLiteral(self, start): number = '' while (self.index < self.length): if (not isHexDigit(self.source[self.index])): break number += self.source[self.index] self.index += 1 if not number: self.throwUnexpectedToken() if isIdentifierStart(self.ccode()): self.throwUnexpectedToken() return { 'type': Token.NumericLiteral, 'value': int(number, 16), 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index} def scanBinaryLiteral(self, start): number = '' while (self.index < self.length): ch = self.source[self.index] if (ch != '0' and ch != '1'): break number += self.source[self.index] self.index += 1 if not number: # only 0b or 0B self.throwUnexpectedToken() if (self.index < self.length): ch = self.source[self.index] # istanbul ignore else if (isIdentifierStart(ch) or isDecimalDigit(ch)): self.throwUnexpectedToken(); return { 'type': Token.NumericLiteral, 'value': int(number, 2), 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index} def scanOctalLiteral(self, prefix, start): if isOctalDigit(prefix): octal = True number = '0' + self.source[self.index] self.index += 1 else: octal = False self.index += 1 number = '' while (self.index < self.length): if (not isOctalDigit(self.source[self.index])): break number += self.source[self.index] self.index += 1 if (not octal and not number): # only 0o or 0O self.throwUnexpectedToken() if (isIdentifierStart(self.ccode()) or isDecimalDigit(self.ccode())): self.throwUnexpectedToken() return { 'type': Token.NumericLiteral, 'value': int(number, 8), 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index} def octalToDecimal(self, ch): # \0 is not octal escape sequence octal = (ch != '0') code = int(ch, 8) if (self.index < self.length and isOctalDigit(self.source[self.index])): octal = True code = code * 8 + int(self.source[self.index], 8) self.index += 1 # 3 digits are only allowed when string starts # with 0, 1, 2, 3 if (ch in '0123' and self.index < self.length and isOctalDigit(self.source[self.index])): code = code * 8 + int((self.source[self.index]), 8) self.index += 1 return { 'code': code, 'octal': octal} def isImplicitOctalLiteral(self): # Implicit octal, unless there is a non-octal digit. # (Annex B.1.1 on Numeric Literals) for i in xrange(self.index + 1, self.length): ch = self.source[i]; if (ch == '8' or ch == '9'): return False; if (not isOctalDigit(ch)): return True return True def scanNumericLiteral(self): ch = self.source[self.index] assert isDecimalDigit(ch) or (ch == '.'), 'Numeric literal must start with a decimal digit or a decimal point' start = self.index number = '' if ch != '.': number = self.source[self.index] self.index += 1 ch = self.source[self.index] # Hex number starts with '0x'. # Octal number starts with '0'. # Octal number in ES6 starts with '0o'. # Binary number in ES6 starts with '0b'. if (number == '0'): if (ch == 'x' or ch == 'X'): self.index += 1 return self.scanHexLiteral(start); if (ch == 'b' or ch == 'B'): self.index += 1 return self.scanBinaryLiteral(start) if (ch == 'o' or ch == 'O'): return self.scanOctalLiteral(ch, start) if (isOctalDigit(ch)): if (self.isImplicitOctalLiteral()): return self.scanOctalLiteral(ch, start); while (isDecimalDigit(self.ccode())): number += self.source[self.index] self.index += 1 ch = self.source[self.index]; if (ch == '.'): number += self.source[self.index] self.index += 1 while (isDecimalDigit(self.source[self.index])): number += self.source[self.index] self.index += 1 ch = self.source[self.index] if (ch == 'e' or ch == 'E'): number += self.source[self.index] self.index += 1 ch = self.source[self.index] if (ch == '+' or ch == '-'): number += self.source[self.index] self.index += 1 if (isDecimalDigit(self.source[self.index])): while (isDecimalDigit(self.source[self.index])): number += self.source[self.index] self.index += 1 else: self.throwUnexpectedToken() if (isIdentifierStart(self.source[self.index])): self.throwUnexpectedToken(); return { 'type': Token.NumericLiteral, 'value': float(number), 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index} # 7.8.4 String Literals def _interpret_regexp(self, string, flags): '''Perform sctring escape - for regexp literals''' self.index = 0 self.length = len(string) self.source = string self.lineNumber = 0 self.lineStart = 0 octal = False st = '' inside_square = 0 while (self.index < self.length): template = '[%s]' if not inside_square else '%s' ch = self.source[self.index] self.index += 1 if ch == '\\': ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch=='u': digs = self.source[self.index:self.index+4] if len(digs)==4 and all(isHexDigit(d) for d in digs): st += template%unichr(int(digs, 16)) self.index += 4 else: st += 'u' elif ch=='x': digs = self.source[self.index:self.index+2] if len(digs)==2 and all(isHexDigit(d) for d in digs): st += template%unichr(int(digs, 16)) self.index += 2 else: st += 'x' # special meaning - single char. elif ch=='0': st += '\\0' elif ch=='n': st += '\\n' elif ch=='r': st += '\\r' elif ch=='t': st += '\\t' elif ch=='f': st += '\\f' elif ch=='v': st += '\\v' # unescape special single characters like . so that they are interpreted literally elif ch in REGEXP_SPECIAL_SINGLE: st += '\\' + ch # character groups elif ch=='b': st += '\\b' elif ch=='B': st += '\\B' elif ch=='w': st += '\\w' elif ch=='W': st += '\\W' elif ch=='d': st += '\\d' elif ch=='D': st += '\\D' elif ch=='s': st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff' elif ch=='S': st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff' else: if isDecimalDigit(ch): num = ch while self.index<self.length and isDecimalDigit(self.source[self.index]): num += self.source[self.index] self.index += 1 st += '\\' + num else: st += ch # DONT ESCAPE!!! else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index else: if ch=='[': inside_square = True elif ch==']': inside_square = False st += ch #print string, 'was transformed to', st return st def scanStringLiteral(self): st = '' octal = False quote = self.source[self.index] assert quote == '\''or quote == '"', 'String literal must starts with a quote' start = self.index; self.index += 1 while (self.index < self.length): ch = self.source[self.index] self.index += 1 if (ch == quote): quote = '' break elif (ch == '\\'): ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch in 'ux': if (self.source[self.index] == '{'): self.index += 1 st += self.scanUnicodeCodePointEscape() else: unescaped = self.scanHexEscape(ch) if (not unescaped): self.throwUnexpectedToken() # with throw I don't know whats the difference st += unescaped elif ch=='n': st += '\n'; elif ch=='r': st += '\r'; elif ch=='t': st += '\t'; elif ch=='b': st += '\b'; elif ch=='f': st += '\f'; elif ch=='v': st += '\x0B' #elif ch in '89': # self.throwUnexpectedToken() # again with throw.... else: if isOctalDigit(ch): octToDec = self.octalToDecimal(ch) octal = octToDec.get('octal') or octal st += unichr(octToDec['code']) else: st += ch else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index elif isLineTerminator(ch): break else: st += ch; if (quote != ''): self.throwUnexpectedToken() return { 'type': Token.StringLiteral, 'value': st, 'octal': octal, 'lineNumber': self.lineNumber, 'lineStart': self.startLineStart, 'start': start, 'end': self.index} def scanTemplate(self): cooked = '' terminated = False tail = False start = self.index head = (self.source[self.index]=='`') rawOffset = 2 self.index += 1 while (self.index < self.length): ch = self.source[self.index] self.index += 1 if (ch == '`'): rawOffset = 1; tail = True terminated = True break elif (ch == '$'): if (self.source[self.index] == '{'): self.state['curlyStack'].append('${') self.index += 1 terminated = True break; cooked += ch elif (ch == '\\'): ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch=='n': cooked += '\n' elif ch=='r': cooked += '\r' elif ch=='t': cooked += '\t' elif ch in 'ux': if (self.source[self.index] == '{'): self.index += 1 cooked += self.scanUnicodeCodePointEscape() else: restore = self.index unescaped = self.scanHexEscape(ch) if (unescaped): cooked += unescaped else: self.index = restore cooked += ch elif ch=='b': cooked += '\b' elif ch=='f': cooked += '\f' elif ch=='v': cooked += '\v' else: if (ch == '0'): if isDecimalDigit(self.ccode()): # Illegal: \01 \02 and so on self.throwError(Messages.TemplateOctalLiteral) cooked += '\0' elif (isOctalDigit(ch)): # Illegal: \1 \2 self.throwError(Messages.TemplateOctalLiteral) else: cooked += ch else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index elif (isLineTerminator(ch)): self.lineNumber += 1 if (ch == '\r' and self.source[self.index] =='\n'): self.index += 1 self.lineStart = self.index cooked += '\n' else: cooked += ch; if (not terminated): self.throwUnexpectedToken() if (not head): self.state['curlyStack'].pop(); return { 'type': Token.Template, 'value': { 'cooked': cooked, 'raw': self.source[start + 1:self.index - rawOffset]}, 'head': head, 'tail': tail, 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': start, 'end': self.index} def testRegExp(self, pattern, flags): #todo: you should return python regexp object return (pattern, flags) def scanRegExpBody(self): ch = self.source[self.index] assert ch == '/', 'Regular expression literal must start with a slash' st = ch self.index += 1 classMarker = False terminated = False while (self.index < self.length): ch = self.source[self.index] self.index += 1 st += ch if (ch == '\\'): ch = self.source[self.index] self.index += 1 # ECMA-262 7.8.5 if (isLineTerminator(ch)): self.throwUnexpectedToken(None, Messages.UnterminatedRegExp) st += ch elif (isLineTerminator(ch)): self.throwUnexpectedToken(None, Messages.UnterminatedRegExp) elif (classMarker): if (ch == ']'): classMarker = False else: if (ch == '/'): terminated = True break elif (ch == '['): classMarker = True; if (not terminated): self.throwUnexpectedToken(None, Messages.UnterminatedRegExp) # Exclude leading and trailing slash. body = st[1:-1] return { 'value': body, 'literal': st} def scanRegExpFlags(self): st = '' flags = '' while (self.index < self.length): ch = self.source[self.index] if (not isIdentifierPart(ch)): break self.index += 1 if (ch == '\\' and self.index < self.length): ch = self.source[self.index] if (ch == 'u'): self.index += 1 restore = self.index ch = self.scanHexEscape('u') if (ch): flags += ch st += '\\u' while restore < self.index: st += self.source[restore] restore += 1 else: self.index = restore flags += 'u' st += '\\u' self.tolerateUnexpectedToken() else: st += '\\' self.tolerateUnexpectedToken() else: flags += ch st += ch return { 'value': flags, 'literal': st} def scanRegExp(self): self.scanning = True self.lookahead = None self.skipComment() start = self.index body = self.scanRegExpBody() flags = self.scanRegExpFlags() value = self.testRegExp(body['value'], flags['value']) scanning = False return { 'literal': body['literal'] + flags['literal'], 'value': value, 'regex': { 'pattern': body['value'], 'flags': flags['value'] }, 'start': start, 'end': self.index} def collectRegex(self): self.skipComment(); return self.scanRegExp() def isIdentifierName(self, token): return token['type'] in {1,3,4,5} #def advanceSlash(self): ??? def advance(self): if (self.index >= self.length): return { 'type': Token.EOF, 'lineNumber': self.lineNumber, 'lineStart': self.lineStart, 'start': self.index, 'end': self.index} ch = self.ccode() if isIdentifierStart(ch): token = self.scanIdentifier() if (self.strict and isStrictModeReservedWord(token['value'])): token['type'] = Token.Keyword return token # Very common: ( and ) and ; if (ch == 0x28 or ch == 0x29 or ch == 0x3B): return self.scanPunctuator() # String literal starts with single quote (U+0027) or double quote (U+0022). if (ch == 0x27 or ch == 0x22): return self.scanStringLiteral() # Dot (.) U+002E can also start a floating-point number, hence the need # to check the next character. if (ch == 0x2E): if (isDecimalDigit(self.ccode(1))): return self.scanNumericLiteral() return self.scanPunctuator(); if (isDecimalDigit(ch)): return self.scanNumericLiteral() # Slash (/) U+002F can also start a regex. #if (extra.tokenize && ch == 0x2F): # return advanceSlash(); # Template literals start with ` (U+0060) for template head # or } (U+007D) for template middle or template tail. if (ch == 0x60 or (ch == 0x7D and self.state['curlyStack'][len(self.state['curlyStack']) - 1] == '${')): return self.scanTemplate() return self.scanPunctuator(); #def collectToken(self): # loc = { # 'start': { # 'line': self.lineNumber, # 'column': self.index - self.lineStart}} # # token = self.advance() # # loc['end'] = { # 'line': self.lineNumber, # 'column': self.index - self.lineStart} # if (token['type'] != Token.EOF): # value = self.source[token['start']: token['end']] # entry = { # 'type': TokenName[token['type']], # 'value': value, # 'range': [token['start'], token['end']], # 'loc': loc} # if (token.get('regex')): # entry['regex'] = { # 'pattern': token['regex']['pattern'], # 'flags': token['regex']['flags']} # self.extra['tokens'].append(entry) # return token; def lex(self): self.scanning = True self.lastIndex = self.index self.lastLineNumber = self.lineNumber self.lastLineStart = self.lineStart self.skipComment() token = self.lookahead self.startIndex = self.index self.startLineNumber = self.lineNumber self.startLineStart = self.lineStart self.lookahead = self.advance() self.scanning = False return token def peek(self): self.scanning = True self.skipComment() self.lastIndex = self.index self.lastLineNumber = self.lineNumber self.lastLineStart = self.lineStart self.startIndex = self.index self.startLineNumber = self.lineNumber self.startLineStart = self.lineStart self.lookahead = self.advance() self.scanning = False def createError(self, line, pos, description): self.log_err_case() from js2py.base import ERRORS, Js, JsToPyException error = ERRORS['SyntaxError']('Line ' + unicode(line) + ': ' + unicode(description)) error.put('index', Js(pos)) error.put('lineNumber', Js(line)) error.put('column', Js(pos - (self.lineStart if self.scanning else self.lastLineStart) + 1)) error.put('description', Js(description)) return JsToPyException(error) # Throw an exception def throwError(self, messageFormat, *args): msg = messageFormat % tuple(unicode(e) for e in args) raise self.createError(self.lastLineNumber, self.lastIndex, msg); def tolerateError(self, messageFormat, *args): return self.throwError(messageFormat, *args) # Throw an exception because of the token. def unexpectedTokenError(self, token={}, message=''): msg = message or Messages.UnexpectedToken if (token): typ = token['type'] if (not message): if typ == Token.EOF: msg = Messages.UnexpectedEOS elif (typ == Token.Identifier): msg = Messages.UnexpectedIdentifier elif (typ == Token.NumericLiteral): msg = Messages.UnexpectedNumber elif (typ == Token.StringLiteral): msg = Messages.UnexpectedString elif (typ == Token.Template): msg = Messages.UnexpectedTemplate else: msg = Messages.UnexpectedToken; if (typ == Token.Keyword): if (isFutureReservedWord(token['value'])): msg = Messages.UnexpectedReserved elif (self.strict and isStrictModeReservedWord(token['value'])): msg = Messages.StrictReservedWord value = token['value']['raw'] if (typ == Token.Template) else token.get('value') else: value = 'ILLEGAL' msg = msg.replace('%s', unicode(value)) return (self.createError(token['lineNumber'], token['start'], msg) if (token and token.get('lineNumber')) else self.createError(self.lineNumber if self.scanning else self.lastLineNumber, self.index if self.scanning else self.lastIndex, msg)) def throwUnexpectedToken(self, token={}, message=''): raise self.unexpectedTokenError(token, message) def tolerateUnexpectedToken(self, token={}, message=''): self.throwUnexpectedToken(token, message) # Expect the next token to match the specified punctuator. # If not, an exception will be thrown. def expect(self, value): token = self.lex() if (token['type'] != Token.Punctuator or token['value'] != value): self.throwUnexpectedToken(token) #/** # * @name expectCommaSeparator # * @description Quietly expect a comma when in tolerant mode, otherwise delegates # * to <code>expect(value)</code> # * @since 2.0 # */ def expectCommaSeparator(self): self.expect(',') # Expect the next token to match the specified keyword. # If not, an exception will be thrown. def expectKeyword(self, keyword): token = self.lex(); if (token['type'] != Token.Keyword or token['value'] != keyword): self.throwUnexpectedToken(token) # Return true if the next token matches the specified punctuator. def match(self, value): return self.lookahead['type'] == Token.Punctuator and self.lookahead['value'] == value # Return true if the next token matches the specified keyword def matchKeyword(self, keyword): return self.lookahead['type'] == Token.Keyword and self.lookahead['value'] == keyword # Return true if the next token matches the specified contextual keyword # (where an identifier is sometimes a keyword depending on the context) def matchContextualKeyword(self, keyword): return self.lookahead['type'] == Token.Identifier and self.lookahead['value'] == keyword # Return true if the next token is an assignment operator def matchAssign(self): if (self.lookahead['type'] != Token.Punctuator): return False; op = self.lookahead['value'] return op in {'=','*=', '/=','%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=' , '^=' , '|='} def consumeSemicolon(self): # Catch the very common case first: immediately a semicolon (U+003B). if (self.at(self.startIndex) == ';' or self.match(';')): self.lex() return if (self.hasLineTerminator): return # TODO: FIXME(ikarienator): this is seemingly an issue in the previous location info convention. self.lastIndex = self.startIndex self.lastLineNumber = self.startLineNumber self.lastLineStart = self.startLineStart if (self.lookahead['type'] != Token.EOF and not self.match('}')): self.throwUnexpectedToken(self.lookahead) # // Cover grammar support. # // # // When an assignment expression position starts with an left parenthesis, the determination of the type # // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead) # // or the first comma. This situation also defers the determination of all the expressions nested in the pair. # // # // There are three productions that can be parsed in a parentheses pair that needs to be determined # // after the outermost pair is closed. They are: # // # // 1. AssignmentExpression # // 2. BindingElements # // 3. AssignmentTargets # // # // In order to avoid exponential backtracking, we use two flags to denote if the production can be # // binding element or assignment target. # // # // The three productions have the relationship: # // # // BindingElements <= AssignmentTargets <= AssignmentExpression # // # // with a single exception that CoverInitializedName when used directly in an Expression, generates # // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the # // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair. # // # // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not # // effect the current flags. This means the production the parser parses is only used as an expression. Therefore # // the CoverInitializedName check is conducted. # // # // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates # // the flags outside of the parser. This means the production the parser parses is used as a part of a potential # // pattern. The CoverInitializedName check is deferred. def isolateCoverGrammar(self, parser): oldIsBindingElement = self.isBindingElement oldIsAssignmentTarget = self.isAssignmentTarget oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError self.isBindingElement = true self.isAssignmentTarget = true self.firstCoverInitializedNameError = null result = parser() if (self.firstCoverInitializedNameError != null): self.throwUnexpectedToken(self.firstCoverInitializedNameError) self.isBindingElement = oldIsBindingElement self.isAssignmentTarget = oldIsAssignmentTarget self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError return result def inheritCoverGrammar(self, parser): oldIsBindingElement = self.isBindingElement oldIsAssignmentTarget = self.isAssignmentTarget oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError self.isBindingElement = true self.isAssignmentTarget = true self.firstCoverInitializedNameError = null result = parser() self.isBindingElement = self.isBindingElement and oldIsBindingElement self.isAssignmentTarget = self.isAssignmentTarget and oldIsAssignmentTarget self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError or self.firstCoverInitializedNameError return result def parseArrayPattern(self): node = Node() elements = [] self.expect('['); while (not self.match(']')): if (self.match(',')): self.lex() elements.append(null) else: if (self.match('...')): restNode = Node() self.lex() rest = self.parseVariableIdentifier() elements.append(restNode.finishRestElement(rest)) break else: elements.append(self.parsePatternWithDefault()) if (not self.match(']')): self.expect(',') self.expect(']') return node.finishArrayPattern(elements) def parsePropertyPattern(self): node = Node() computed = self.match('[') if (self.lookahead['type'] == Token.Identifier): key = self.parseVariableIdentifier() if (self.match('=')): self.lex(); init = self.parseAssignmentExpression() return node.finishProperty( 'init', key, false, WrappingNode(key).finishAssignmentPattern(key, init), false, false) elif (not self.match(':')): return node.finishProperty('init', key, false, key, false, true) else: key = self.parseObjectPropertyKey() self.expect(':') init = self.parsePatternWithDefault() return node.finishProperty('init', key, computed, init, false, false) def parseObjectPattern(self): node = Node() properties = [] self.expect('{') while (not self.match('}')): properties.append(self.parsePropertyPattern()) if (not self.match('}')): self.expect(',') self.lex() return node.finishObjectPattern(properties) def parsePattern(self): if (self.lookahead['type'] == Token.Identifier): return self.parseVariableIdentifier() elif (self.match('[')): return self.parseArrayPattern() elif (self.match('{')): return self.parseObjectPattern() self.throwUnexpectedToken(self.lookahead) def parsePatternWithDefault(self): startToken = self.lookahead pattern = self.parsePattern() if (self.match('=')): self.lex() right = self.isolateCoverGrammar(self.parseAssignmentExpression) pattern = WrappingNode(startToken).finishAssignmentPattern(pattern, right) return pattern # 11.1.4 Array Initialiser def parseArrayInitialiser(self): elements = [] node = Node() self.expect('[') while (not self.match(']')): if (self.match(',')): self.lex() elements.append(null) elif (self.match('...')): restSpread = Node() self.lex() restSpread.finishSpreadElement(self.inheritCoverGrammar(self.parseAssignmentExpression)) if (not self.match(']')): self.isAssignmentTarget = self.isBindingElement = false self.expect(',') elements.append(restSpread) else: elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression)) if (not self.match(']')): self.expect(',') self.lex(); return node.finishArrayExpression(elements) # 11.1.5 Object Initialiser def parsePropertyFunction(self, node, paramInfo): self.isAssignmentTarget = self.isBindingElement = false; previousStrict = self.strict; body = self.isolateCoverGrammar(self.parseFunctionSourceElements); if (self.strict and paramInfo['firstRestricted']): self.tolerateUnexpectedToken(paramInfo['firstRestricted'], paramInfo.get('message')) if (self.strict and paramInfo['stricted']): self.tolerateUnexpectedToken(paramInfo['stricted'], paramInfo.get('message')); self.strict = previousStrict; return node.finishFunctionExpression(null, paramInfo['params'], paramInfo['defaults'], body) def parsePropertyMethodFunction(self): node = Node(); params = self.parseParams(); method = self.parsePropertyFunction(node, params); return method; def parseObjectPropertyKey(self): node = Node() token = self.lex(); # // Note: This function is called only from parseObjectProperty(), where # // EOF and Punctuator tokens are already filtered out. typ = token['type'] if typ in [Token.StringLiteral, Token.NumericLiteral]: if self.strict and token.get('octal'): self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral); return node.finishLiteral(token); elif typ in {Token.Identifier, Token.BooleanLiteral, Token.NullLiteral, Token.Keyword}: return node.finishIdentifier(token['value']); elif typ==Token.Punctuator: if (token['value'] == '['): expr = self.isolateCoverGrammar(self.parseAssignmentExpression) self.expect(']') return expr self.throwUnexpectedToken(token) def lookaheadPropertyName(self): typ = self.lookahead['type'] if typ in {Token.Identifier, Token.StringLiteral, Token.BooleanLiteral, Token.NullLiteral, Token.NumericLiteral, Token.Keyword}: return true if typ == Token.Punctuator: return self.lookahead['value'] == '[' return false # // This function is to try to parse a MethodDefinition as defined in 14.3. But in the case of object literals, # // it might be called at a position where there is in fact a short hand identifier pattern or a data property. # // This can only be determined after we consumed up to the left parentheses. # // # // In order to avoid back tracking, it returns `null` if the position is not a MethodDefinition and the caller # // is responsible to visit other options. def tryParseMethodDefinition(self, token, key, computed, node): if (token['type'] == Token.Identifier): # check for `get` and `set`; if (token['value'] == 'get' and self.lookaheadPropertyName()): computed = self.match('['); key = self.parseObjectPropertyKey() methodNode = Node() self.expect('(') self.expect(')') value = self.parsePropertyFunction(methodNode, { 'params': [], 'defaults': [], 'stricted': null, 'firstRestricted': null, 'message': null }) return node.finishProperty('get', key, computed, value, false, false) elif (token['value'] == 'set' and self.lookaheadPropertyName()): computed = self.match('[') key = self.parseObjectPropertyKey() methodNode = Node() self.expect('(') options = { 'params': [], 'defaultCount': 0, 'defaults': [], 'firstRestricted': null, 'paramSet': {} } if (self.match(')')): self.tolerateUnexpectedToken(self.lookahead); else: self.parseParam(options); if (options['defaultCount'] == 0): options['defaults'] = [] self.expect(')') value = self.parsePropertyFunction(methodNode, options); return node.finishProperty('set', key, computed, value, false, false); if (self.match('(')): value = self.parsePropertyMethodFunction(); return node.finishProperty('init', key, computed, value, true, false) return null; def checkProto(self, key, computed, hasProto): if (computed == false and (key['type'] == Syntax.Identifier and key.name == '__proto__' or key['type'] == Syntax.Literal and key.value == '__proto__')): if (hasProto.value): self.tolerateError(Messages.DuplicateProtoProperty); else: hasProto.value = true; def parseObjectProperty(self, hasProto): token = self.lookahead node = Node() computed = self.match('['); key = self.parseObjectPropertyKey(); maybeMethod = self.tryParseMethodDefinition(token, key, computed, node) if (maybeMethod): self.checkProto(maybeMethod.key, maybeMethod.computed, hasProto); return maybeMethod; #// init property or short hand property. self.checkProto(key, computed, hasProto); if (self.match(':')): self.lex(); value = self.inheritCoverGrammar(self.parseAssignmentExpression) return node.finishProperty('init', key, computed, value, false, false) if (token['type'] == Token.Identifier): if (self.match('=')): self.firstCoverInitializedNameError = self.lookahead; self.lex(); value = self.isolateCoverGrammar(self.parseAssignmentExpression); return node.finishProperty('init', key, computed, WrappingNode(token).finishAssignmentPattern(key, value), false, true) return node.finishProperty('init', key, computed, key, false, true) self.throwUnexpectedToken(self.lookahead) def parseObjectInitialiser(self): properties = [] hasProto = {'value': false} node = Node(); self.expect('{'); while (not self.match('}')): properties.append(self.parseObjectProperty(hasProto)); if (not self.match('}')): self.expectCommaSeparator() self.expect('}'); return node.finishObjectExpression(properties) def reinterpretExpressionAsPattern(self, expr): typ = (expr['type']) if typ in {Syntax.Identifier, Syntax.MemberExpression, Syntax.RestElement, Syntax.AssignmentPattern}: pass elif typ == Syntax.SpreadElement: expr['type'] = Syntax.RestElement self.reinterpretExpressionAsPattern(expr.argument) elif typ == Syntax.ArrayExpression: expr['type'] = Syntax.ArrayPattern for i in xrange(len(expr['elements'])): if (expr['elements'][i] != null): self.reinterpretExpressionAsPattern(expr['elements'][i]) elif typ == Syntax.ObjectExpression: expr['type'] = Syntax.ObjectPattern for i in xrange(len(expr['properties'])): self.reinterpretExpressionAsPattern(expr['properties'][i]['value']); elif Syntax.AssignmentExpression: expr['type'] = Syntax.AssignmentPattern; self.reinterpretExpressionAsPattern(expr['left']) else: #// Allow other node type for tolerant parsing. return def parseTemplateElement(self, option): if (self.lookahead['type'] != Token.Template or (option['head'] and not self.lookahead['head'])): self.throwUnexpectedToken() node = Node(); token = self.lex(); return node.finishTemplateElement({ 'raw': token['value']['raw'], 'cooked': token['value']['cooked'] }, token['tail']) def parseTemplateLiteral(self): node = Node() quasi = self.parseTemplateElement({ 'head': true }) quasis = [quasi] expressions = [] while (not quasi['tail']): expressions.append(self.parseExpression()); quasi = self.parseTemplateElement({ 'head': false }); quasis.append(quasi) return node.finishTemplateLiteral(quasis, expressions) # 11.1.6 The Grouping Operator def parseGroupExpression(self): self.expect('('); if (self.match(')')): self.lex(); if (not self.match('=>')): self.expect('=>') return { 'type': PlaceHolders.ArrowParameterPlaceHolder, 'params': []} startToken = self.lookahead if (self.match('...')): expr = self.parseRestElement(); self.expect(')'); if (not self.match('=>')): self.expect('=>') return { 'type': PlaceHolders.ArrowParameterPlaceHolder, 'params': [expr]} self.isBindingElement = true; expr = self.inheritCoverGrammar(self.parseAssignmentExpression); if (self.match(',')): self.isAssignmentTarget = false; expressions = [expr] while (self.startIndex < self.length): if (not self.match(',')): break self.lex(); if (self.match('...')): if (not self.isBindingElement): self.throwUnexpectedToken(self.lookahead) expressions.append(self.parseRestElement()) self.expect(')'); if (not self.match('=>')): self.expect('=>'); self.isBindingElement = false for i in xrange(len(expressions)): self.reinterpretExpressionAsPattern(expressions[i]) return { 'type': PlaceHolders.ArrowParameterPlaceHolder, 'params': expressions} expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression)) expr = WrappingNode(startToken).finishSequenceExpression(expressions); self.expect(')') if (self.match('=>')): if (not self.isBindingElement): self.throwUnexpectedToken(self.lookahead); if (expr['type'] == Syntax.SequenceExpression): for i in xrange(len(expr.expressions)): self.reinterpretExpressionAsPattern(expr['expressions'][i]) else: self.reinterpretExpressionAsPattern(expr); expr = { 'type': PlaceHolders.ArrowParameterPlaceHolder, 'params': expr['expressions'] if expr['type'] == Syntax.SequenceExpression else [expr]} self.isBindingElement = false return expr # 11.1 Primary Expressions def parsePrimaryExpression(self): if (self.match('(')): self.isBindingElement = false; return self.inheritCoverGrammar(self.parseGroupExpression) if (self.match('[')): return self.inheritCoverGrammar(self.parseArrayInitialiser) if (self.match('{')): return self.inheritCoverGrammar(self.parseObjectInitialiser) typ = self.lookahead['type'] node = Node(); if (typ == Token.Identifier): expr = node.finishIdentifier(self.lex()['value']); elif (typ == Token.StringLiteral or typ == Token.NumericLiteral): self.isAssignmentTarget = self.isBindingElement = false if (self.strict and self.lookahead.get('octal')): self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral) expr = node.finishLiteral(self.lex()) elif (typ == Token.Keyword): self.isAssignmentTarget = self.isBindingElement = false if (self.matchKeyword('function')): return self.parseFunctionExpression() if (self.matchKeyword('this')): self.lex() return node.finishThisExpression() if (self.matchKeyword('class')): return self.parseClassExpression() self.throwUnexpectedToken(self.lex()) elif (typ == Token.BooleanLiteral): isAssignmentTarget = self.isBindingElement = false token = self.lex(); token['value'] = (token['value'] == 'true') expr = node.finishLiteral(token) elif (typ == Token.NullLiteral): self.isAssignmentTarget = self.isBindingElement = false token = self.lex() token['value'] = null; expr = node.finishLiteral(token) elif (self.match('/') or self.match('/=')): self.isAssignmentTarget = self.isBindingElement = false; self.index = self.startIndex; token = self.scanRegExp(); # hehe, here you are! self.lex(); expr = node.finishLiteral(token); elif (typ == Token.Template): expr = self.parseTemplateLiteral() else: self.throwUnexpectedToken(self.lex()); return expr; # 11.2 Left-Hand-Side Expressions def parseArguments(self): args = []; self.expect('('); if (not self.match(')')): while (self.startIndex < self.length): args.append(self.isolateCoverGrammar(self.parseAssignmentExpression)) if (self.match(')')): break self.expectCommaSeparator() self.expect(')') return args; def parseNonComputedProperty(self): node = Node() token = self.lex(); if (not self.isIdentifierName(token)): self.throwUnexpectedToken(token) return node.finishIdentifier(token['value']) def parseNonComputedMember(self): self.expect('.') return self.parseNonComputedProperty(); def parseComputedMember(self): self.expect('[') expr = self.isolateCoverGrammar(self.parseExpression) self.expect(']') return expr def parseNewExpression(self): node = Node() self.expectKeyword('new') callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression) args = self.parseArguments() if self.match('(') else [] self.isAssignmentTarget = self.isBindingElement = false return node.finishNewExpression(callee, args) def parseLeftHandSideExpressionAllowCall(self): previousAllowIn = self.state['allowIn'] startToken = self.lookahead; self.state['allowIn'] = true; if (self.matchKeyword('super') and self.state['inFunctionBody']): expr = Node(); self.lex(); expr = expr.finishSuper() if (not self.match('(') and not self.match('.') and not self.match('[')): self.throwUnexpectedToken(self.lookahead); else: expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression) while True: if (self.match('.')): self.isBindingElement = false; self.isAssignmentTarget = true; property = self.parseNonComputedMember(); expr = WrappingNode(startToken).finishMemberExpression('.', expr, property) elif (self.match('(')): self.isBindingElement = false; self.isAssignmentTarget = false; args = self.parseArguments(); expr = WrappingNode(startToken).finishCallExpression(expr, args) elif (self.match('[')): self.isBindingElement = false; self.isAssignmentTarget = true; property = self.parseComputedMember(); expr = WrappingNode(startToken).finishMemberExpression('[', expr, property) elif (self.lookahead['type'] == Token.Template and self.lookahead['head']): quasi = self.parseTemplateLiteral() expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi) else: break self.state['allowIn'] = previousAllowIn return expr def parseLeftHandSideExpression(self): assert self.state['allowIn'], 'callee of new expression always allow in keyword.' startToken = self.lookahead if (self.matchKeyword('super') and self.state['inFunctionBody']): expr = Node(); self.lex(); expr = expr.finishSuper(); if (not self.match('[') and not self.match('.')): self.throwUnexpectedToken(self.lookahead) else: expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression); while True: if (self.match('[')): self.isBindingElement = false; self.isAssignmentTarget = true; property = self.parseComputedMember(); expr = WrappingNode(startToken).finishMemberExpression('[', expr, property) elif (self.match('.')): self.isBindingElement = false; self.isAssignmentTarget = true; property = self.parseNonComputedMember(); expr = WrappingNode(startToken).finishMemberExpression('.', expr, property); elif (self.lookahead['type'] == Token.Template and self.lookahead['head']): quasi = self.parseTemplateLiteral(); expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi) else: break return expr # 11.3 Postfix Expressions def parsePostfixExpression(self): startToken = self.lookahead expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall) if (not self.hasLineTerminator and self.lookahead['type'] == Token.Punctuator): if (self.match('++') or self.match('--')): # 11.3.1, 11.3.2 if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)): self.tolerateError(Messages.StrictLHSPostfix) if (not self.isAssignmentTarget): self.tolerateError(Messages.InvalidLHSInAssignment); self.isAssignmentTarget = self.isBindingElement = false; token = self.lex(); expr = WrappingNode(startToken).finishPostfixExpression(token['value'], expr); return expr; # 11.4 Unary Operators def parseUnaryExpression(self): if (self.lookahead['type'] != Token.Punctuator and self.lookahead['type'] != Token.Keyword): expr = self.parsePostfixExpression(); elif (self.match('++') or self.match('--')): startToken = self.lookahead; token = self.lex(); expr = self.inheritCoverGrammar(self.parseUnaryExpression); # 11.4.4, 11.4.5 if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)): self.tolerateError(Messages.StrictLHSPrefix) if (not self.isAssignmentTarget): self.tolerateError(Messages.InvalidLHSInAssignment) expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr) self.isAssignmentTarget = self.isBindingElement = false elif (self.match('+') or self.match('-') or self.match('~') or self.match('!')): startToken = self.lookahead; token = self.lex(); expr = self.inheritCoverGrammar(self.parseUnaryExpression); expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr) self.isAssignmentTarget = self.isBindingElement = false; elif (self.matchKeyword('delete') or self.matchKeyword('void') or self.matchKeyword('typeof')): startToken = self.lookahead; token = self.lex(); expr = self.inheritCoverGrammar(self.parseUnaryExpression); expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr); if (self.strict and expr.operator == 'delete' and expr.argument.type == Syntax.Identifier): self.tolerateError(Messages.StrictDelete) self.isAssignmentTarget = self.isBindingElement = false; else: expr = self.parsePostfixExpression() return expr def binaryPrecedence(self, token, allowIn): prec = 0; typ = token['type'] if (typ != Token.Punctuator and typ != Token.Keyword): return 0; val = token['value'] if val == 'in' and not allowIn: return 0 return PRECEDENCE.get(val, 0) # 11.5 Multiplicative Operators # 11.6 Additive Operators # 11.7 Bitwise Shift Operators # 11.8 Relational Operators # 11.9 Equality Operators # 11.10 Binary Bitwise Operators # 11.11 Binary Logical Operators def parseBinaryExpression(self): marker = self.lookahead; left = self.inheritCoverGrammar(self.parseUnaryExpression); token = self.lookahead; prec = self.binaryPrecedence(token, self.state['allowIn']); if (prec == 0): return left self.isAssignmentTarget = self.isBindingElement = false; token['prec'] = prec self.lex() markers = [marker, self.lookahead]; right = self.isolateCoverGrammar(self.parseUnaryExpression); stack = [left, token, right]; while True: prec = self.binaryPrecedence(self.lookahead, self.state['allowIn']) if not prec > 0: break # Reduce: make a binary expression from the three topmost entries. while ((len(stack) > 2) and (prec <= stack[len(stack) - 2]['prec'])): right = stack.pop(); operator = stack.pop()['value'] left = stack.pop() markers.pop() expr = WrappingNode(markers[len(markers) - 1]).finishBinaryExpression(operator, left, right) stack.append(expr) # Shift token = self.lex(); token['prec'] = prec; stack.append(token); markers.append(self.lookahead); expr = self.isolateCoverGrammar(self.parseUnaryExpression); stack.append(expr); # Final reduce to clean-up the stack. i = len(stack) - 1; expr = stack[i] markers.pop() while (i > 1): expr = WrappingNode(markers.pop()).finishBinaryExpression(stack[i - 1]['value'], stack[i - 2], expr); i -= 2 return expr # 11.12 Conditional Operator def parseConditionalExpression(self): startToken = self.lookahead expr = self.inheritCoverGrammar(self.parseBinaryExpression); if (self.match('?')): self.lex() previousAllowIn = self.state['allowIn'] self.state['allowIn'] = true; consequent = self.isolateCoverGrammar(self.parseAssignmentExpression); self.state['allowIn'] = previousAllowIn; self.expect(':'); alternate = self.isolateCoverGrammar(self.parseAssignmentExpression) expr = WrappingNode(startToken).finishConditionalExpression(expr, consequent, alternate); self.isAssignmentTarget = self.isBindingElement = false; return expr # [ES6] 14.2 Arrow Function def parseConciseBody(self): if (self.match('{')): return self.parseFunctionSourceElements() return self.isolateCoverGrammar(self.parseAssignmentExpression) def checkPatternParam(self, options, param): typ = param.type if typ == Syntax.Identifier: self.validateParam(options, param, param.name); elif typ == Syntax.RestElement: self.checkPatternParam(options, param.argument) elif typ == Syntax.AssignmentPattern: self.checkPatternParam(options, param.left) elif typ == Syntax.ArrayPattern: for i in xrange(len(param.elements)): if (param.elements[i] != null): self.checkPatternParam(options, param.elements[i]); else: assert typ == Syntax.ObjectPattern, 'Invalid type' for i in xrange(len(param.properties)): self.checkPatternParam(options, param.properties[i].value); def reinterpretAsCoverFormalsList(self, expr): defaults = []; defaultCount = 0; params = [expr]; typ = expr.type if typ == Syntax.Identifier: pass elif typ == PlaceHolders.ArrowParameterPlaceHolder: params = expr.params else: return null options = { 'paramSet': {}} le = len(params) for i in xrange(le): param = params[i] if param.type == Syntax.AssignmentPattern: params[i] = param.left; defaults.append(param.right); defaultCount += 1 self.checkPatternParam(options, param.left); else: self.checkPatternParam(options, param); params[i] = param; defaults.append(null); if (options.get('message') == Messages.StrictParamDupe): token = options['stricted'] if self.strict else options['firstRestricted'] self.throwUnexpectedToken(token, options.get('message')); if (defaultCount == 0): defaults = [] return { 'params': params, 'defaults': defaults, 'stricted': options['stricted'], 'firstRestricted': options['firstRestricted'], 'message': options.get('message')} def parseArrowFunctionExpression(self, options, node): if (self.hasLineTerminator): self.tolerateUnexpectedToken(self.lookahead) self.expect('=>') previousStrict = self.strict; body = self.parseConciseBody(); if (self.strict and options['firstRestricted']): self.throwUnexpectedToken(options['firstRestricted'], options.get('message')); if (self.strict and options['stricted']): self.tolerateUnexpectedToken(options['stricted'], options['message']); self.strict = previousStrict return node.finishArrowFunctionExpression(options['params'], options['defaults'], body, body.type != Syntax.BlockStatement) # 11.13 Assignment Operators def parseAssignmentExpression(self): startToken = self.lookahead; token = self.lookahead; expr = self.parseConditionalExpression(); if (expr.type == PlaceHolders.ArrowParameterPlaceHolder or self.match('=>')): self.isAssignmentTarget = self.isBindingElement = false; lis = self.reinterpretAsCoverFormalsList(expr) if (lis): self.firstCoverInitializedNameError = null; return self.parseArrowFunctionExpression(lis, WrappingNode(startToken)) return expr if (self.matchAssign()): if (not self.isAssignmentTarget): self.tolerateError(Messages.InvalidLHSInAssignment) # 11.13.1 if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)): self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment); if (not self.match('=')): self.isAssignmentTarget = self.isBindingElement = false; else: self.reinterpretExpressionAsPattern(expr) token = self.lex(); right = self.isolateCoverGrammar(self.parseAssignmentExpression) expr = WrappingNode(startToken).finishAssignmentExpression(token['value'], expr, right); self.firstCoverInitializedNameError = null return expr # 11.14 Comma Operator def parseExpression(self): startToken = self.lookahead expr = self.isolateCoverGrammar(self.parseAssignmentExpression) if (self.match(',')): expressions = [expr]; while (self.startIndex < self.length): if (not self.match(',')): break self.lex(); expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression)) expr = WrappingNode(startToken).finishSequenceExpression(expressions); return expr # 12.1 Block def parseStatementListItem(self): if (self.lookahead['type'] == Token.Keyword): val = (self.lookahead['value']) if val=='export': if (self.sourceType != 'module'): self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration) return self.parseExportDeclaration(); elif val == 'import': if (self.sourceType != 'module'): self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration); return self.parseImportDeclaration(); elif val == 'const' or val == 'let': return self.parseLexicalDeclaration({'inFor': false}); elif val == 'function': return self.parseFunctionDeclaration(Node()); elif val == 'class': return self.parseClassDeclaration(); elif val == 'pyimport': # <<<<< MODIFIED HERE return self.parsePyimportStatement() return self.parseStatement(); def parsePyimportStatement(self): n = Node() self.lex() n.finishPyimport(self.parseVariableIdentifier()) self.consumeSemicolon() return n def parseStatementList(self): list = []; while (self.startIndex < self.length): if (self.match('}')): break list.append(self.parseStatementListItem()) return list def parseBlock(self): node = Node(); self.expect('{'); block = self.parseStatementList() self.expect('}'); return node.finishBlockStatement(block); # 12.2 Variable Statement def parseVariableIdentifier(self): node = Node() token = self.lex() if (token['type'] != Token.Identifier): if (self.strict and token['type'] == Token.Keyword and isStrictModeReservedWord(token['value'])): self.tolerateUnexpectedToken(token, Messages.StrictReservedWord); else: self.throwUnexpectedToken(token) return node.finishIdentifier(token['value']) def parseVariableDeclaration(self): init = null node = Node(); d = self.parsePattern(); # 12.2.1 if (self.strict and isRestrictedWord(d.name)): self.tolerateError(Messages.StrictVarName); if (self.match('=')): self.lex(); init = self.isolateCoverGrammar(self.parseAssignmentExpression); elif (d.type != Syntax.Identifier): self.expect('=') return node.finishVariableDeclarator(d, init) def parseVariableDeclarationList(self): lis = [] while True: lis.append(self.parseVariableDeclaration()) if (not self.match(',')): break self.lex(); if not (self.startIndex < self.length): break return lis; def parseVariableStatement(self, node): self.expectKeyword('var') declarations = self.parseVariableDeclarationList() self.consumeSemicolon() return node.finishVariableDeclaration(declarations) def parseLexicalBinding(self, kind, options): init = null node = Node() d = self.parsePattern(); # 12.2.1 if (self.strict and d.type == Syntax.Identifier and isRestrictedWord(d.name)): self.tolerateError(Messages.StrictVarName); if (kind == 'const'): if (not self.matchKeyword('in')): self.expect('=') init = self.isolateCoverGrammar(self.parseAssignmentExpression) elif ((not options['inFor'] and d.type != Syntax.Identifier) or self.match('=')): self.expect('='); init = self.isolateCoverGrammar(self.parseAssignmentExpression); return node.finishVariableDeclarator(d, init) def parseBindingList(self, kind, options): list = []; while True: list.append(self.parseLexicalBinding(kind, options)); if (not self.match(',')): break self.lex(); if not (self.startIndex < self.length): break return list; def parseLexicalDeclaration(self, options): node = Node(); kind = self.lex()['value'] assert kind == 'let' or kind == 'const', 'Lexical declaration must be either let or const' declarations = self.parseBindingList(kind, options); self.consumeSemicolon(); return node.finishLexicalDeclaration(declarations, kind); def parseRestElement(self): node = Node(); self.lex(); if (self.match('{')): self.throwError(Messages.ObjectPatternAsRestParameter) param = self.parseVariableIdentifier(); if (self.match('=')): self.throwError(Messages.DefaultRestParameter); if (not self.match(')')): self.throwError(Messages.ParameterAfterRestParameter); return node.finishRestElement(param); # 12.3 Empty Statement def parseEmptyStatement(self, node): self.expect(';'); return node.finishEmptyStatement() # 12.4 Expression Statement def parseExpressionStatement(self, node): expr = self.parseExpression(); self.consumeSemicolon(); return node.finishExpressionStatement(expr); # 12.5 If statement def parseIfStatement(self, node): self.expectKeyword('if'); self.expect('('); test = self.parseExpression(); self.expect(')'); consequent = self.parseStatement(); if (self.matchKeyword('else')): self.lex(); alternate = self.parseStatement(); else: alternate = null; return node.finishIfStatement(test, consequent, alternate) # 12.6 Iteration Statements def parseDoWhileStatement(self, node): self.expectKeyword('do') oldInIteration = self.state['inIteration'] self.state['inIteration'] = true body = self.parseStatement(); self.state['inIteration'] = oldInIteration; self.expectKeyword('while'); self.expect('('); test = self.parseExpression(); self.expect(')') if (self.match(';')): self.lex() return node.finishDoWhileStatement(body, test) def parseWhileStatement(self, node): self.expectKeyword('while') self.expect('(') test = self.parseExpression() self.expect(')') oldInIteration = self.state['inIteration'] self.state['inIteration'] = true body = self.parseStatement() self.state['inIteration'] = oldInIteration return node.finishWhileStatement(test, body) def parseForStatement(self, node): previousAllowIn = self.state['allowIn'] init = test = update = null self.expectKeyword('for') self.expect('(') if (self.match(';')): self.lex() else: if (self.matchKeyword('var')): init = Node() self.lex() self.state['allowIn'] = false; init = init.finishVariableDeclaration(self.parseVariableDeclarationList()) self.state['allowIn'] = previousAllowIn if (len(init.declarations) == 1 and self.matchKeyword('in')): self.lex() left = init right = self.parseExpression() init = null else: self.expect(';') elif (self.matchKeyword('const') or self.matchKeyword('let')): init = Node() kind = self.lex()['value'] self.state['allowIn'] = false declarations = self.parseBindingList(kind, {'inFor': true}) self.state['allowIn'] = previousAllowIn if (len(declarations) == 1 and declarations[0].init == null and self.matchKeyword('in')): init = init.finishLexicalDeclaration(declarations, kind); self.lex(); left = init; right = self.parseExpression(); init = null; else: self.consumeSemicolon(); init = init.finishLexicalDeclaration(declarations, kind); else: initStartToken = self.lookahead self.state['allowIn'] = false init = self.inheritCoverGrammar(self.parseAssignmentExpression); self.state['allowIn'] = previousAllowIn; if (self.matchKeyword('in')): if (not self.isAssignmentTarget): self.tolerateError(Messages.InvalidLHSInForIn) self.lex(); self.reinterpretExpressionAsPattern(init); left = init; right = self.parseExpression(); init = null; else: if (self.match(',')): initSeq = [init]; while (self.match(',')): self.lex(); initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression)) init = WrappingNode(initStartToken).finishSequenceExpression(initSeq) self.expect(';'); if ('left' not in locals()): if (not self.match(';')): test = self.parseExpression(); self.expect(';'); if (not self.match(')')): update = self.parseExpression(); self.expect(')'); oldInIteration = self.state['inIteration'] self.state['inIteration'] = true; body = self.isolateCoverGrammar(self.parseStatement) self.state['inIteration'] = oldInIteration; return node.finishForStatement(init, test, update, body) if ('left' not in locals()) else node.finishForInStatement(left, right, body); # 12.7 The continue statement def parseContinueStatement(self, node): label = null self.expectKeyword('continue'); # Optimize the most common form: 'continue;'. if ord(self.source[self.startIndex]) == 0x3B: self.lex(); if (not self.state['inIteration']): self.throwError(Messages.IllegalContinue) return node.finishContinueStatement(null) if (self.hasLineTerminator): if (not self.state['inIteration']): self.throwError(Messages.IllegalContinue); return node.finishContinueStatement(null); if (self.lookahead['type'] == Token.Identifier): label = self.parseVariableIdentifier(); key = '$' + label.name; if not key in self.state['labelSet']: # todo make sure its correct! self.throwError(Messages.UnknownLabel, label.name); self.consumeSemicolon() if (label == null and not self.state['inIteration']): self.throwError(Messages.IllegalContinue) return node.finishContinueStatement(label) # 12.8 The break statement def parseBreakStatement(self, node): label = null self.expectKeyword('break'); # Catch the very common case first: immediately a semicolon (U+003B). if (ord(self.source[self.lastIndex]) == 0x3B): self.lex(); if (not (self.state['inIteration'] or self.state['inSwitch'])): self.throwError(Messages.IllegalBreak) return node.finishBreakStatement(null) if (self.hasLineTerminator): if (not (self.state['inIteration'] or self.state['inSwitch'])): self.throwError(Messages.IllegalBreak); return node.finishBreakStatement(null); if (self.lookahead['type'] == Token.Identifier): label = self.parseVariableIdentifier(); key = '$' + label.name; if not (key in self.state['labelSet']): self.throwError(Messages.UnknownLabel, label.name); self.consumeSemicolon(); if (label == null and not (self.state['inIteration'] or self.state['inSwitch'])): self.throwError(Messages.IllegalBreak) return node.finishBreakStatement(label); # 12.9 The return statement def parseReturnStatement(self, node): argument = null; self.expectKeyword('return'); if (not self.state['inFunctionBody']): self.tolerateError(Messages.IllegalReturn); # 'return' followed by a space and an identifier is very common. if (ord(self.source[self.lastIndex]) == 0x20): if (isIdentifierStart(self.source[self.lastIndex + 1])): argument = self.parseExpression(); self.consumeSemicolon(); return node.finishReturnStatement(argument) if (self.hasLineTerminator): # HACK return node.finishReturnStatement(null) if (not self.match(';')): if (not self.match('}') and self.lookahead['type'] != Token.EOF): argument = self.parseExpression(); self.consumeSemicolon(); return node.finishReturnStatement(argument); # 12.10 The with statement def parseWithStatement(self, node): if (self.strict): self.tolerateError(Messages.StrictModeWith) self.expectKeyword('with'); self.expect('('); obj = self.parseExpression(); self.expect(')'); body = self.parseStatement(); return node.finishWithStatement(obj, body); # 12.10 The swith statement def parseSwitchCase(self): consequent = [] node = Node(); if (self.matchKeyword('default')): self.lex(); test = null; else: self.expectKeyword('case'); test = self.parseExpression(); self.expect(':'); while (self.startIndex < self.length): if (self.match('}') or self.matchKeyword('default') or self.matchKeyword('case')): break statement = self.parseStatementListItem() consequent.append(statement) return node.finishSwitchCase(test, consequent) def parseSwitchStatement(self, node): self.expectKeyword('switch'); self.expect('('); discriminant = self.parseExpression(); self.expect(')'); self.expect('{'); cases = []; if (self.match('}')): self.lex(); return node.finishSwitchStatement(discriminant, cases); oldInSwitch = self.state['inSwitch']; self.state['inSwitch'] = true; defaultFound = false; while (self.startIndex < self.length): if (self.match('}')): break; clause = self.parseSwitchCase(); if (clause.test == null): if (defaultFound): self.throwError(Messages.MultipleDefaultsInSwitch); defaultFound = true; cases.append(clause); self.state['inSwitch'] = oldInSwitch; self.expect('}'); return node.finishSwitchStatement(discriminant, cases); # 12.13 The throw statement def parseThrowStatement(self, node): self.expectKeyword('throw'); if (self.hasLineTerminator): self.throwError(Messages.NewlineAfterThrow); argument = self.parseExpression(); self.consumeSemicolon(); return node.finishThrowStatement(argument); # 12.14 The try statement def parseCatchClause(self): node = Node(); self.expectKeyword('catch'); self.expect('('); if (self.match(')')): self.throwUnexpectedToken(self.lookahead); param = self.parsePattern(); # 12.14.1 if (self.strict and isRestrictedWord(param.name)): self.tolerateError(Messages.StrictCatchVariable); self.expect(')'); body = self.parseBlock(); return node.finishCatchClause(param, body); def parseTryStatement(self, node): handler = null finalizer = null; self.expectKeyword('try'); block = self.parseBlock(); if (self.matchKeyword('catch')): handler = self.parseCatchClause() if (self.matchKeyword('finally')): self.lex(); finalizer = self.parseBlock(); if (not handler and not finalizer): self.throwError(Messages.NoCatchOrFinally) return node.finishTryStatement(block, handler, finalizer) # 12.15 The debugger statement def parseDebuggerStatement(self, node): self.expectKeyword('debugger'); self.consumeSemicolon(); return node.finishDebuggerStatement(); # 12 Statements def parseStatement(self): typ = self.lookahead['type'] if (typ == Token.EOF): self.throwUnexpectedToken(self.lookahead) if (typ == Token.Punctuator and self.lookahead['value'] == '{'): return self.parseBlock() self.isAssignmentTarget = self.isBindingElement = true; node = Node(); val = self.lookahead['value'] if (typ == Token.Punctuator): if val == ';': return self.parseEmptyStatement(node); elif val == '(': return self.parseExpressionStatement(node); elif (typ == Token.Keyword): if val == 'break': return self.parseBreakStatement(node); elif val == 'continue': return self.parseContinueStatement(node); elif val == 'debugger': return self.parseDebuggerStatement(node); elif val == 'do': return self.parseDoWhileStatement(node); elif val == 'for': return self.parseForStatement(node); elif val == 'function': return self.parseFunctionDeclaration(node); elif val == 'if': return self.parseIfStatement(node); elif val == 'return': return self.parseReturnStatement(node); elif val == 'switch': return self.parseSwitchStatement(node); elif val == 'throw': return self.parseThrowStatement(node); elif val == 'try': return self.parseTryStatement(node); elif val == 'var': return self.parseVariableStatement(node); elif val == 'while': return self.parseWhileStatement(node); elif val == 'with': return self.parseWithStatement(node); expr = self.parseExpression(); # 12.12 Labelled Statements if ((expr.type == Syntax.Identifier) and self.match(':')): self.lex(); key = '$' + expr.name if key in self.state['labelSet']: self.throwError(Messages.Redeclaration, 'Label', expr.name); self.state['labelSet'][key] = true labeledBody = self.parseStatement() del self.state['labelSet'][key] return node.finishLabeledStatement(expr, labeledBody) self.consumeSemicolon(); return node.finishExpressionStatement(expr) # 13 Function Definition def parseFunctionSourceElements(self): body = [] node = Node() firstRestricted = None self.expect('{') while (self.startIndex < self.length): if (self.lookahead['type'] != Token.StringLiteral): break token = self.lookahead; statement = self.parseStatementListItem() body.append(statement) if (statement.expression.type != Syntax.Literal): # this is not directive break directive = self.source[token['start']+1 : token['end']-1] if (directive == 'use strict'): self.strict = true; if (firstRestricted): self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral); else: if (not firstRestricted and token.get('octal')): firstRestricted = token; oldLabelSet = self.state['labelSet'] oldInIteration = self.state['inIteration'] oldInSwitch = self.state['inSwitch'] oldInFunctionBody = self.state['inFunctionBody'] oldParenthesisCount = self.state['parenthesizedCount'] self.state['labelSet'] = {} self.state['inIteration'] = false self.state['inSwitch'] = false self.state['inFunctionBody'] = true self.state['parenthesizedCount'] = 0 while (self.startIndex < self.length): if (self.match('}')): break body.append(self.parseStatementListItem()) self.expect('}') self.state['labelSet'] = oldLabelSet; self.state['inIteration'] = oldInIteration; self.state['inSwitch'] = oldInSwitch; self.state['inFunctionBody'] = oldInFunctionBody; self.state['parenthesizedCount'] = oldParenthesisCount; return node.finishBlockStatement(body) def validateParam(self, options, param, name): key = '$' + name if (self.strict): if (isRestrictedWord(name)): options['stricted'] = param; options['message'] = Messages.StrictParamName if key in options['paramSet']: options['stricted'] = param; options['message'] = Messages.StrictParamDupe; elif (not options['firstRestricted']): if (isRestrictedWord(name)): options['firstRestricted'] = param; options['message'] = Messages.StrictParamName; elif (isStrictModeReservedWord(name)): options['firstRestricted'] = param; options['message'] = Messages.StrictReservedWord; elif key in options['paramSet']: options['firstRestricted']= param options['message'] = Messages.StrictParamDupe; options['paramSet'][key] = true def parseParam(self, options): token = self.lookahead de = None if (token['value'] == '...'): param = self.parseRestElement(); self.validateParam(options, param.argument, param.argument.name); options['params'].append(param); options['defaults'].append(null); return false param = self.parsePatternWithDefault(); self.validateParam(options, token, token['value']); if (param.type == Syntax.AssignmentPattern): de = param.right; param = param.left; options['defaultCount'] += 1 options['params'].append(param); options['defaults'].append(de) return not self.match(')') def parseParams(self, firstRestricted): options = { 'params': [], 'defaultCount': 0, 'defaults': [], 'firstRestricted': firstRestricted} self.expect('('); if (not self.match(')')): options['paramSet'] = {}; while (self.startIndex < self.length): if (not self.parseParam(options)): break self.expect(','); self.expect(')'); if (options['defaultCount'] == 0): options['defaults'] = []; return { 'params': options['params'], 'defaults': options['defaults'], 'stricted': options.get('stricted'), 'firstRestricted': options.get('firstRestricted'), 'message': options.get('message')} def parseFunctionDeclaration(self, node, identifierIsOptional=None): d = null params = [] defaults = [] message = None firstRestricted = None self.expectKeyword('function'); if (identifierIsOptional or not self.match('(')): token = self.lookahead; d = self.parseVariableIdentifier(); if (self.strict): if (isRestrictedWord(token['value'])): self.tolerateUnexpectedToken(token, Messages.StrictFunctionName); else: if (isRestrictedWord(token['value'])): firstRestricted = token; message = Messages.StrictFunctionName; elif (isStrictModeReservedWord(token['value'])): firstRestricted = token; message = Messages.StrictReservedWord; tmp = self.parseParams(firstRestricted); params = tmp['params'] defaults = tmp['defaults'] stricted = tmp['stricted'] firstRestricted = tmp['firstRestricted'] if (tmp.get('message')): message = tmp['message']; previousStrict = self.strict; body = self.parseFunctionSourceElements(); if (self.strict and firstRestricted): self.throwUnexpectedToken(firstRestricted, message); if (self.strict and stricted): self.tolerateUnexpectedToken(stricted, message); self.strict = previousStrict; return node.finishFunctionDeclaration(d, params, defaults, body); def parseFunctionExpression(self): id = null params = [] defaults = [] node = Node(); firstRestricted = None message = None self.expectKeyword('function'); if (not self.match('(')): token = self.lookahead; id = self.parseVariableIdentifier(); if (self.strict): if (isRestrictedWord(token['value'])): self.tolerateUnexpectedToken(token, Messages.StrictFunctionName); else: if (isRestrictedWord(token['value'])): firstRestricted = token; message = Messages.StrictFunctionName; elif (isStrictModeReservedWord(token['value'])): firstRestricted = token; message = Messages.StrictReservedWord; tmp = self.parseParams(firstRestricted); params = tmp['params'] defaults = tmp['defaults'] stricted = tmp['stricted'] firstRestricted = tmp['firstRestricted'] if (tmp.get('message')): message = tmp['message'] previousStrict = self.strict; body = self.parseFunctionSourceElements(); if (self.strict and firstRestricted): self.throwUnexpectedToken(firstRestricted, message); if (self.strict and stricted): self.tolerateUnexpectedToken(stricted, message); self.strict = previousStrict; return node.finishFunctionExpression(id, params, defaults, body); # todo Translate parse class functions! def parseClassExpression(self): raise NotImplementedError() def parseClassDeclaration(self): raise NotImplementedError() # 14 Program def parseScriptBody(self): body = [] firstRestricted = None while (self.startIndex < self.length): token = self.lookahead; if (token['type'] != Token.StringLiteral): break statement = self.parseStatementListItem(); body.append(statement); if (statement.expression.type != Syntax.Literal): # this is not directive break directive = self.source[token['start'] + 1: token['end'] - 1] if (directive == 'use strict'): self.strict = true; if (firstRestricted): self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral) else: if (not firstRestricted and token.get('octal')): firstRestricted = token; while (self.startIndex < self.length): statement = self.parseStatementListItem(); # istanbul ignore if if (statement is None): break body.append(statement); return body; def parseProgram(self): self.peek() node = Node() body = self.parseScriptBody() return node.finishProgram(body) # DONE!!! def parse(self, code, options={}): if options: raise NotImplementedError('Options not implemented! You can only use default settings.') self.clean() self.source = unicode(code) + ' \n ; //END' # I have to add it in order not to check for EOF every time self.index = 0 self.lineNumber = 1 if len(self.source) > 0 else 0 self.lineStart = 0 self.startIndex = self.index self.startLineNumber = self.lineNumber; self.startLineStart = self.lineStart; self.length = len(self.source) self.lookahead = null; self.state = { 'allowIn': true, 'labelSet': {}, 'inFunctionBody': false, 'inIteration': false, 'inSwitch': false, 'lastCommentStart': -1, 'curlyStack': [], 'parenthesizedCount': None} self.sourceType = 'script'; self.strict = false; program = self.parseProgram(); return node_to_dict(program) if __name__=='__main__': import time test_path = None if test_path: f = open(test_path, 'rb') x = f.read() f.close() else: x = 'var $ = "Hello!"' p = PyJsParser() t = time.time() res = p.parse(x) dt = time.time() - t+ 0.000000001 if test_path: print(len(res)) else: pprint(res) print() print('Parsed everyting in', round(dt,5), 'seconds.') print('Thats %d characters per second' % int(len(x)/dt))
105,141
Python
.py
2,421
30.799257
200
0.552191
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,846
std_nodes.py
evilhero_mylar/lib/js2py/translators/std_nodes.py
from .pyjsparserdata import * import six class BaseNode: def finish(self): pass def finishArrayExpression(self, elements): self.type = Syntax.ArrayExpression self.elements = elements self.finish() return self def finishArrayPattern(self, elements): self.type = Syntax.ArrayPattern self.elements = elements self.finish() return self def finishArrowFunctionExpression(self, params, defaults, body, expression): self.type = Syntax.ArrowFunctionExpression self.id = None self.params = params self.defaults = defaults self.body = body self.generator = False self.expression = expression self.finish() return self def finishAssignmentExpression(self, operator, left, right): self.type = Syntax.AssignmentExpression self.operator = operator self.left = left self.right = right self.finish() return self def finishAssignmentPattern(self, left, right): self.type = Syntax.AssignmentPattern self.left = left self.right = right self.finish() return self def finishBinaryExpression(self, operator, left, right): self.type = Syntax.LogicalExpression if (operator == '||' or operator == '&&') else Syntax.BinaryExpression self.operator = operator self.left = left self.right = right self.finish() return self def finishBlockStatement(self, body): self.type = Syntax.BlockStatement self.body = body self.finish() return self def finishBreakStatement(self, label): self.type = Syntax.BreakStatement self.label = label self.finish() return self def finishCallExpression(self, callee, args): self.type = Syntax.CallExpression self.callee = callee self.arguments = args self.finish() return self def finishCatchClause(self, param, body): self.type = Syntax.CatchClause self.param = param self.body = body self.finish() return self def finishClassBody(self, body): self.type = Syntax.ClassBody self.body = body self.finish() return self def finishClassDeclaration(self, id, superClass, body): self.type = Syntax.ClassDeclaration self.id = id self.superClass = superClass self.body = body self.finish() return self def finishClassExpression(self, id, superClass, body): self.type = Syntax.ClassExpression self.id = id self.superClass = superClass self.body = body self.finish() return self def finishConditionalExpression(self, test, consequent, alternate): self.type = Syntax.ConditionalExpression self.test = test self.consequent = consequent self.alternate = alternate self.finish() return self def finishContinueStatement(self, label): self.type = Syntax.ContinueStatement self.label = label self.finish() return self def finishDebuggerStatement(self, ): self.type = Syntax.DebuggerStatement self.finish() return self def finishDoWhileStatement(self, body, test): self.type = Syntax.DoWhileStatement self.body = body self.test = test self.finish() return self def finishEmptyStatement(self, ): self.type = Syntax.EmptyStatement self.finish() return self def finishExpressionStatement(self, expression): self.type = Syntax.ExpressionStatement self.expression = expression self.finish() return self def finishForStatement(self, init, test, update, body): self.type = Syntax.ForStatement self.init = init self.test = test self.update = update self.body = body self.finish() return self def finishForInStatement(self, left, right, body): self.type = Syntax.ForInStatement self.left = left self.right = right self.body = body self.each = False self.finish() return self def finishFunctionDeclaration(self, id, params, defaults, body): self.type = Syntax.FunctionDeclaration self.id = id self.params = params self.defaults = defaults self.body = body self.generator = False self.expression = False self.finish() return self def finishFunctionExpression(self, id, params, defaults, body): self.type = Syntax.FunctionExpression self.id = id self.params = params self.defaults = defaults self.body = body self.generator = False self.expression = False self.finish() return self def finishIdentifier(self, name): self.type = Syntax.Identifier self.name = name self.finish() return self def finishIfStatement(self, test, consequent, alternate): self.type = Syntax.IfStatement self.test = test self.consequent = consequent self.alternate = alternate self.finish() return self def finishLabeledStatement(self, label, body): self.type = Syntax.LabeledStatement self.label = label self.body = body self.finish() return self def finishLiteral(self, token): self.type = Syntax.Literal self.value = token['value'] self.raw = None # todo fix it? if token.get('regex'): self.regex = token['regex'] self.finish() return self def finishMemberExpression(self, accessor, object, property): self.type = Syntax.MemberExpression self.computed = accessor == '[' self.object = object self.property = property self.finish() return self def finishNewExpression(self, callee, args): self.type = Syntax.NewExpression self.callee = callee self.arguments = args self.finish() return self def finishObjectExpression(self, properties): self.type = Syntax.ObjectExpression self.properties = properties self.finish() return self def finishObjectPattern(self, properties): self.type = Syntax.ObjectPattern self.properties = properties self.finish() return self def finishPostfixExpression(self, operator, argument): self.type = Syntax.UpdateExpression self.operator = operator self.argument = argument self.prefix = False self.finish() return self def finishProgram(self, body): self.type = Syntax.Program self.body = body self.finish() return self def finishPyimport(self, imp): self.type = 'PyimportStatement' self.imp = imp self.finish() return self def finishProperty(self, kind, key, computed, value, method, shorthand): self.type = Syntax.Property self.key = key self.computed = computed self.value = value self.kind = kind self.method = method self.shorthand = shorthand self.finish() return self def finishRestElement(self, argument): self.type = Syntax.RestElement self.argument = argument self.finish() return self def finishReturnStatement(self, argument): self.type = Syntax.ReturnStatement self.argument = argument self.finish() return self def finishSequenceExpression(self, expressions): self.type = Syntax.SequenceExpression self.expressions = expressions self.finish() return self def finishSpreadElement(self, argument): self.type = Syntax.SpreadElement self.argument = argument self.finish() return self def finishSwitchCase(self, test, consequent): self.type = Syntax.SwitchCase self.test = test self.consequent = consequent self.finish() return self def finishSuper(self, ): self.type = Syntax.Super self.finish() return self def finishSwitchStatement(self, discriminant, cases): self.type = Syntax.SwitchStatement self.discriminant = discriminant self.cases = cases self.finish() return self def finishTaggedTemplateExpression(self, tag, quasi): self.type = Syntax.TaggedTemplateExpression self.tag = tag self.quasi = quasi self.finish() return self def finishTemplateElement(self, value, tail): self.type = Syntax.TemplateElement self.value = value self.tail = tail self.finish() return self def finishTemplateLiteral(self, quasis, expressions): self.type = Syntax.TemplateLiteral self.quasis = quasis self.expressions = expressions self.finish() return self def finishThisExpression(self, ): self.type = Syntax.ThisExpression self.finish() return self def finishThrowStatement(self, argument): self.type = Syntax.ThrowStatement self.argument = argument self.finish() return self def finishTryStatement(self, block, handler, finalizer): self.type = Syntax.TryStatement self.block = block self.guardedHandlers = [] self.handlers = [handler] if handler else [] self.handler = handler self.finalizer = finalizer self.finish() return self def finishUnaryExpression(self, operator, argument): self.type = Syntax.UpdateExpression if (operator == '++' or operator == '--') else Syntax.UnaryExpression self.operator = operator self.argument = argument self.prefix = True self.finish() return self def finishVariableDeclaration(self, declarations): self.type = Syntax.VariableDeclaration self.declarations = declarations self.kind = 'var' self.finish() return self def finishLexicalDeclaration(self, declarations, kind): self.type = Syntax.VariableDeclaration self.declarations = declarations self.kind = kind self.finish() return self def finishVariableDeclarator(self, id, init): self.type = Syntax.VariableDeclarator self.id = id self.init = init self.finish() return self def finishWhileStatement(self, test, body): self.type = Syntax.WhileStatement self.test = test self.body = body self.finish() return self def finishWithStatement(self, object, body): self.type = Syntax.WithStatement self.object = object self.body = body self.finish() return self def finishExportSpecifier(self, local, exported): self.type = Syntax.ExportSpecifier self.exported = exported or local self.local = local self.finish() return self def finishImportDefaultSpecifier(self, local): self.type = Syntax.ImportDefaultSpecifier self.local = local self.finish() return self def finishImportNamespaceSpecifier(self, local): self.type = Syntax.ImportNamespaceSpecifier self.local = local self.finish() return self def finishExportNamedDeclaration(self, declaration, specifiers, src): self.type = Syntax.ExportNamedDeclaration self.declaration = declaration self.specifiers = specifiers self.source = src self.finish() return self def finishExportDefaultDeclaration(self, declaration): self.type = Syntax.ExportDefaultDeclaration self.declaration = declaration self.finish() return self def finishExportAllDeclaration(self, src): self.type = Syntax.ExportAllDeclaration self.source = src self.finish() return self def finishImportSpecifier(self, local, imported): self.type = Syntax.ImportSpecifier self.local = local or imported self.imported = imported self.finish() return self def finishImportDeclaration(self, specifiers, src): self.type = Syntax.ImportDeclaration self.specifiers = specifiers self.source = src self.finish() return self def __getitem__(self, item): return getattr(self, item) def __setitem__(self, key, value): setattr(self, key, value) class Node(BaseNode): pass class WrappingNode(BaseNode): def __init__(self, startToken=None): pass def node_to_dict(node): # extremely important for translation speed if isinstance(node, list): return [node_to_dict(e) for e in node] elif isinstance(node, dict): return {k:node_to_dict(v) for k,v in six.iteritems(node)} elif not isinstance(node, BaseNode): return node return {k:node_to_dict(v) for k, v in six.iteritems(node.__dict__)}
15,451
Python
.py
400
25.08
119
0.569945
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,847
translator.py
evilhero_mylar/lib/js2py/translators/translator.py
from . import pyjsparser #from pyesprima import esprima from . import translating_nodes import hashlib import re # the re below is how we'll recognise numeric constants. # it finds any 'simple numeric that is not preceded with an alphanumeric character # the numeric can be a float (so a dot is found) but # it does not recognise notation such as 123e5, 0xFF, infinity or NaN CP_NUMERIC_RE = re.compile(r'(?<![a-zA-Z0-9_"\'])([0-9\.]+)') CP_NUMERIC_PLACEHOLDER = '__PyJsNUM_%i_PyJsNUM__' CP_NUMERIC_PLACEHOLDER_REVERSE_RE = re.compile( CP_NUMERIC_PLACEHOLDER.replace('%i', '([0-9\.]+)') ) # the re below is how we'll recognise string constants # it finds a ' or ", then reads until the next matching ' or " # this re only services simple cases, it can not be used when # there are escaped quotes in the expression #CP_STRING_1 = re.compile(r'(["\'])(.*?)\1') # this is how we'll recognise string constants CP_STRING = '"([^\\\\"]+|\\\\([bfnrtv\'"\\\\]|[0-3]?[0-7]{1,2}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}))*"|\'([^\\\\\']+|\\\\([bfnrtv\'"\\\\]|[0-3]?[0-7]{1,2}|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}))*\'' CP_STRING_RE = re.compile(CP_STRING) # this is how we'll recognise string constants CP_STRING_PLACEHOLDER = '__PyJsSTR_%i_PyJsSTR__' CP_STRING_PLACEHOLDER_REVERSE_RE = re.compile( CP_STRING_PLACEHOLDER.replace('%i', '([0-9\.]+)') ) cache = {} # This crap is still needed but I removed it for speed reasons. Have to think of better idea # import js2py.pyjs, sys # # Redefine builtin objects... Do you have a better idea? # for m in list(sys.modules): # if m.startswith('js2py'): # del sys.modules[m] # del js2py.pyjs # del js2py DEFAULT_HEADER = u'''from js2py.pyjs import * # setting scope var = Scope( JS_BUILTINS ) set_global_object(var) # Code follows: ''' def dbg(x): """does nothing, legacy dummy function""" return '' def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False): """js has to be a javascript source code. returns equivalent python code.""" if use_compilation_plan and not '//' in js and not '/*' in js: return translate_js_with_compilation_plan(js, HEADER=HEADER) parser = pyjsparser.PyJsParser() parsed = parser.parse(js) # js to esprima syntax tree # Another way of doing that would be with my auto esprima translation but its much slower and causes import problems: # parsed = esprima.parse(js).to_dict() translating_nodes.clean_stacks() return HEADER + translating_nodes.trans(parsed) # syntax tree to python code class match_unumerator(object): """This class ise used """ matchcount = -1 def __init__(self, placeholder_mask): self.placeholder_mask = placeholder_mask self.matches = [] def __call__(self, match): self.matchcount += 1 self.matches.append(match.group(0)) return self.placeholder_mask%self.matchcount def __repr__(self): return '\n'.join(self.placeholder_mask%counter + '=' + match for counter, match in enumerate(self.matches)) def wrap_up(self, output): for counter, value in enumerate(self.matches): output = output.replace("u'" + self.placeholder_mask%(counter) + "'", value, 1) return output def get_compilation_plan(js): match_increaser_str = match_unumerator(CP_STRING_PLACEHOLDER) compilation_plan = re.sub( CP_STRING, match_increaser_str, js ) match_increaser_num = match_unumerator(CP_NUMERIC_PLACEHOLDER) compilation_plan = re.sub(CP_NUMERIC_RE, match_increaser_num, compilation_plan) # now put quotes, note that just patching string replaces is somewhat faster than # using another re: compilation_plan = compilation_plan.replace('__PyJsNUM_', '"__PyJsNUM_').replace('_PyJsNUM__', '_PyJsNUM__"') compilation_plan = compilation_plan.replace('__PyJsSTR_', '"__PyJsSTR_').replace('_PyJsSTR__', '_PyJsSTR__"') return match_increaser_str, match_increaser_num, compilation_plan def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER): """js has to be a javascript source code. returns equivalent python code. compile plans only work with the following restrictions: - only enabled for oneliner expressions - when there are comments in the js code string substitution is disabled - when there nested escaped quotes string substitution is disabled, so cacheable: Q1 == 1 && name == 'harry' not cacheable: Q1 == 1 && name == 'harry' // some comment not cacheable: Q1 == 1 && name == 'o\'Reilly' not cacheable: Q1 == 1 && name /* some comment */ == 'o\'Reilly' """ match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(js) cp_hash = hashlib.md5(compilation_plan.encode('utf-8')).digest() try: python_code = cache[cp_hash]['proto_python_code'] except: parser = pyjsparser.PyJsParser() parsed = parser.parse(compilation_plan) # js to esprima syntax tree # Another way of doing that would be with my auto esprima translation but its much slower and causes import problems: # parsed = esprima.parse(js).to_dict() translating_nodes.clean_stacks() python_code = translating_nodes.trans(parsed) # syntax tree to python code cache[cp_hash] = { 'compilation_plan': compilation_plan, 'proto_python_code': python_code, } python_code = match_increaser_str.wrap_up(python_code) python_code = match_increaser_num.wrap_up(python_code) return HEADER + python_code def trasnlate(js, HEADER=DEFAULT_HEADER): """js has to be a javascript source code. returns equivalent python code. Equivalent to translate_js""" return translate_js(js, HEADER) syntax_tree_translate = translating_nodes.trans if __name__=='__main__': PROFILE = False import js2py import codecs def main(): with codecs.open("esprima.js", "r", "utf-8") as f: d = f.read() r = js2py.translate_js(d) with open('res.py','wb') as f2: f2.write(r) exec(r, {}) if PROFILE: import cProfile cProfile.run('main()', sort='tottime') else: main()
6,294
Python
.py
138
40.101449
188
0.665414
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,848
jseval.py
evilhero_mylar/lib/js2py/host/jseval.py
from js2py.base import * import inspect try: from js2py.translators.translator import translate_js except: pass @Js def Eval(code): local_scope = inspect.stack()[3][0].f_locals['var'] global_scope = this.GlobalObject # todo fix scope - we have to behave differently if called through variable other than eval # we will use local scope (default) globals()['var'] = local_scope try: py_code = translate_js(code.to_string().value, '') except SyntaxError as syn_err: raise MakeError('SyntaxError', str(syn_err)) lines = py_code.split('\n') # a simple way to return value from eval. Will not work in complex cases. has_return = False for n in xrange(len(lines)): line = lines[len(lines)-n-1] if line.strip(): if line.startswith(' '): break elif line.strip()=='pass': continue elif any(line.startswith(e) for e in ['return ', 'continue ', 'break', 'raise ']): break else: has_return = True cand = 'EVAL_RESULT = (%s)\n'%line try: compile(cand, '', 'exec') except SyntaxError: break lines[len(lines)-n-1] = cand py_code = '\n'.join(lines) break #print py_code executor(py_code) if has_return: return globals()['EVAL_RESULT'] def executor(code): exec(code, globals())
1,521
Python
.py
45
24.777778
95
0.562203
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,849
console.py
evilhero_mylar/lib/js2py/host/console.py
from js2py.base import * @Js def console(): pass @Js def log(): print(arguments[0]) console.put('log', log)
119
Python
.py
8
12.5
24
0.685185
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,850
jsfunctions.py
evilhero_mylar/lib/js2py/host/jsfunctions.py
from js2py.base import * RADIX_CHARS = {'1': 1, '0': 0, '3': 3, '2': 2, '5': 5, '4': 4, '7': 7, '6': 6, '9': 9, '8': 8, 'a': 10, 'c': 12, 'b': 11, 'e': 14, 'd': 13, 'g': 16, 'f': 15, 'i': 18, 'h': 17, 'k': 20, 'j': 19, 'm': 22, 'l': 21, 'o': 24, 'n': 23, 'q': 26, 'p': 25, 's': 28, 'r': 27, 'u': 30, 't': 29, 'w': 32, 'v': 31, 'y': 34, 'x': 33, 'z': 35, 'A': 10, 'C': 12, 'B': 11, 'E': 14, 'D': 13, 'G': 16, 'F': 15, 'I': 18, 'H': 17, 'K': 20, 'J': 19, 'M': 22, 'L': 21, 'O': 24, 'N': 23, 'Q': 26, 'P': 25, 'S': 28, 'R': 27, 'U': 30, 'T': 29, 'W': 32, 'V': 31, 'Y': 34, 'X': 33, 'Z': 35} @Js def parseInt (string , radix): string = string.to_string().value.lstrip() sign = 1 if string and string[0] in {'+', '-'}: if string[0]=='-': sign = -1 string = string[1:] r = radix.to_int32() strip_prefix = True if r: if r<2 or r>36: return NaN if r!=16: strip_prefix = False else: r = 10 if strip_prefix: if len(string)>=2 and string[:2] in {'0x', '0X'}: string = string[2:] r = 16 n = 0 num = 0 while n<len(string): cand = RADIX_CHARS.get(string[n]) if cand is None or not cand < r: break num = cand + num*r n += 1 if not n: return NaN return sign*num @Js def parseFloat(string): string = string.to_string().value.strip() sign = 1 if string and string[0] in {'+', '-'}: if string[0]=='-': sign = -1 string = string[1:] num = None length = 1 max_len = None failed = 0 while length<=len(string): try: num = float(string[:length]) max_len = length failed = 0 except: failed += 1 if failed>4: # cant be a number anymore break length += 1 if num is None: return NaN return sign*float(string[:max_len]) @Js def isNaN(number): if number.to_number().is_nan(): return true return false @Js def isFinite(number): num = number.to_number() if num.is_nan() or num.is_infinity(): return false return true #todo URI handling!
2,314
Python
.py
76
22.763158
113
0.453567
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,851
interface.py
evilhero_mylar/lib/js2py/host/dom/interface.py
from StringIO import StringIO from constants import * from bs4 import BeautifulSoup from js2py.base import * try: import lxml def parse(source): return BeautifulSoup(source, 'lxml') except: def parse(source): return BeautifulSoup(source) x = '''<table> <tbody> <tr> <td>Shady Grove</td> <td>Aeolian</td> </tr> <tr> <td>Over the River, Charlie</td> <td>Dorian</td> </tr> </tbody> </table>''' class DOM(PyJs): prototype = ObjectPrototype def __init__(self): self.own = {} def readonly(self, name, val): self.define_own_property(name, {'writable':False, 'enumerable':False, 'configurable':False, 'value': Js(val)}) # DOMStringList class DOMStringListPrototype(DOM): Class = 'DOMStringListPrototype' def contains(element): return element.to_string().value in this._string_list def item(index): return this._string_list[index.to_int()] if 0<=index.to_int()<len(this._string_list) else index.null class DOMStringList(DOM): Class = 'DOMStringList' prototype = compose_prototype(DOMStringListPrototype) def __init__(self, _string_list): self.own = {} self._string_list = _string_list # NameList class NameListPrototype(DOM):
1,302
Python
.py
44
24.386364
118
0.667209
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,852
constants.py
evilhero_mylar/lib/js2py/host/dom/constants.py
from js2py.base import * def _get_conts(idl): def is_valid(c): try: exec(c) return 1 except: pass return '\n'.join(filter(is_valid, (' '.join(e.strip(' ;').split()[-3:]) for e in idl.splitlines()))) default_attrs = {'writable':True, 'enumerable':True, 'configurable':True} def compose_prototype(Class, attrs=default_attrs): prototype = Class() for i in dir(Class): e = getattr(Class, i) if hasattr(e, '__func__'): temp = PyJsFunction(e.__func__, FunctionPrototype) attrs = {k:v for k,v in attrs.iteritems()} attrs['value'] = temp prototype.define_own_property(i, attrs) return prototype # Error codes INDEX_SIZE_ERR = 1 DOMSTRING_SIZE_ERR = 2 HIERARCHY_REQUEST_ERR = 3 WRONG_DOCUMENT_ERR = 4 INVALID_CHARACTER_ERR = 5 NO_DATA_ALLOWED_ERR = 6 NO_MODIFICATION_ALLOWED_ERR = 7 NOT_FOUND_ERR = 8 NOT_SUPPORTED_ERR = 9 INUSE_ATTRIBUTE_ERR = 10 INVALID_STATE_ERR = 11 SYNTAX_ERR = 12 INVALID_MODIFICATION_ERR = 13 NAMESPACE_ERR = 14 INVALID_ACCESS_ERR = 15 VALIDATION_ERR = 16 TYPE_MISMATCH_ERR = 17
1,139
Python
.py
38
25.157895
104
0.648352
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,853
jsnumber.py
evilhero_mylar/lib/js2py/constructors/jsnumber.py
from ..base import * CONSTS = {'prototype': NumberPrototype, 'MAX_VALUE':1.7976931348623157e308, 'MIN_VALUE': 5.0e-324, 'NaN': NaN, 'NEGATIVE_INFINITY': float('-inf'), 'POSITIVE_INFINITY': float('inf')} fill_in_props(Number, CONSTS, {'enumerable': False, 'writable': False, 'configurable': False}) NumberPrototype.define_own_property('constructor', {'value': Number, 'enumerable': False, 'writable': True, 'configurable': True})
703
Python
.py
14
29.714286
74
0.446064
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,854
jsboolean.py
evilhero_mylar/lib/js2py/constructors/jsboolean.py
from ..base import * BooleanPrototype.define_own_property('constructor', {'value': Boolean, 'enumerable': False, 'writable': True, 'configurable': True}) Boolean.define_own_property('prototype', {'value': BooleanPrototype, 'enumerable': False, 'writable': False, 'configurable': False})
570
Python
.py
9
30.888889
75
0.378571
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,855
jsfunction.py
evilhero_mylar/lib/js2py/constructors/jsfunction.py
from ..base import * try: from ..translators.translator import translate_js except: pass @Js def Function(): # convert arguments to python list of strings a = [e.to_string().value for e in arguments.to_list()] body = ';' args = () if len(a): body = '%s;' % a[-1] args = a[:-1] # translate this function to js inline function js_func = '(function (%s) {%s})' % (','.join(args), body) # now translate js inline to python function py_func = translate_js(js_func, '') # add set func scope to global scope # a but messy solution but works :) globals()['var'] = PyJs.GlobalObject # define py function and return it temp = executor(py_func, globals()) temp.source = '{%s}'%body temp.func_name = 'anonymous' return temp def executor(f, glob): exec(f, globals()) return globals()['PyJs_anonymous_0_'] #new statement simply calls Function Function.create = Function #set constructor property inside FunctionPrototype fill_in_props(FunctionPrototype, {'constructor':Function}, default_attrs) #attach prototype to Function constructor Function.define_own_property('prototype', {'value': FunctionPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) #Fix Function length (its 0 and should be 1) Function.own['length']['value'] = Js(1)
1,476
Python
.py
40
30.1
73
0.625088
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,856
time_helpers.py
evilhero_mylar/lib/js2py/constructors/time_helpers.py
# NOTE: t must be INT!!! import time import datetime import warnings try: from tzlocal import get_localzone LOCAL_ZONE = get_localzone() except: # except all problems... warnings.warn('Please install or fix tzlocal library (pip install tzlocal) in order to make Date object work better. Otherwise I will assume DST is in effect all the time', Warning) class LOCAL_ZONE: @staticmethod def dst(*args): return 1 from lib.js2py.base import MakeError CUM = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) msPerDay = 86400000 msPerYear = int(86400000*365.242) msPerSecond = 1000 msPerMinute = 60000 msPerHour = 3600000 HoursPerDay = 24 MinutesPerHour = 60 SecondsPerMinute = 60 NaN = float('nan') LocalTZA = - time.timezone * msPerSecond def DaylightSavingTA(t): if t is NaN: return t try: return int(LOCAL_ZONE.dst(datetime.datetime.utcfromtimestamp(t//1000)).seconds)*1000 except: warnings.warn('Invalid datetime date, assumed DST time, may be inaccurate...', Warning) return 1 #raise MakeError('TypeError', 'date not supported by python.datetime. I will solve it in future versions') def GetTimeZoneName(t): return time.tzname[DaylightSavingTA(t)>0] def LocalToUTC(t): return t - LocalTZA - DaylightSavingTA(t - LocalTZA) def UTCToLocal(t): return t + LocalTZA + DaylightSavingTA(t) def Day(t): return t//86400000 def TimeWithinDay(t): return t%86400000 def DaysInYear(y): if y%4: return 365 elif y%100: return 366 elif y%400: return 365 else: return 366 def DayFromYear(y): return 365 * (y-1970) + (y-1969)//4 -(y-1901)//100 + (y-1601)//400 def TimeFromYear(y): return 86400000 * DayFromYear(y) def YearFromTime(t): guess = 1970 - t//31556908800 # msPerYear gt = TimeFromYear(guess) if gt<=t: while gt<=t: guess += 1 gt = TimeFromYear(guess) return guess-1 else: while gt>t: guess -= 1 gt = TimeFromYear(guess) return guess def DayWithinYear(t): return Day(t) - DayFromYear(YearFromTime(t)) def InLeapYear(t): y = YearFromTime(t) if y%4: return 0 elif y%100: return 1 elif y%400: return 0 else: return 1 def MonthFromTime(t): day = DayWithinYear(t) leap = InLeapYear(t) if day<31: return 0 day -= leap if day<59: return 1 elif day<90: return 2 elif day<120: return 3 elif day<151: return 4 elif day<181: return 5 elif day<212: return 6 elif day<243: return 7 elif day<273: return 8 elif day<304: return 9 elif day<334: return 10 else: return 11 def DateFromTime(t): mon = MonthFromTime(t) day = DayWithinYear(t) return day-CUM[mon] - (1 if InLeapYear(t) and mon>=2 else 0) + 1 def WeekDay(t): # 0 == sunday return (Day(t) + 4) % 7 def msFromTime(t): return t % 1000 def SecFromTime(t): return (t//1000) % 60 def MinFromTime(t): return (t//60000) % 60 def HourFromTime(t): return (t//3600000) % 24 def MakeTime (hour, Min, sec, ms): # takes PyJs objects and returns t if not (hour.is_finite() and Min.is_finite() and sec.is_finite() and ms.is_finite()): return NaN h, m, s, milli = hour.to_int(), Min.to_int(), sec.to_int(), ms.to_int() return h*3600000 + m*60000 + s*1000 + milli def MakeDay(year, month, date): # takes PyJs objects and returns t if not (year.is_finite() and month.is_finite() and date.is_finite()): return NaN y, m, dt = year.to_int(), month.to_int(), date.to_int() y += m//12 mn = m % 12 d = DayFromYear(y) + CUM[mn] + dt - 1 + (1 if DaysInYear(y)==366 and mn>=2 else 0) return d # ms per day def MakeDate (day, time): return 86400000*day + time def TimeClip(t): if t!=t or abs(t)==float('inf'): return NaN if abs(t) > 8.64 *10**15: return NaN return int(t)
4,133
Python
.py
148
22.581081
185
0.631139
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,857
jsregexp.py
evilhero_mylar/lib/js2py/constructors/jsregexp.py
from ..base import * RegExpPrototype.define_own_property('constructor', {'value': RegExp, 'enumerable': False, 'writable': True, 'configurable': True}) RegExp.define_own_property('prototype', {'value': RegExpPrototype, 'enumerable': False, 'writable': False, 'configurable': False})
566
Python
.py
9
30.444444
75
0.374101
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,858
jsstring.py
evilhero_mylar/lib/js2py/constructors/jsstring.py
from ..base import * # python 3 support import six if six.PY3: unichr = chr @Js def fromCharCode(): args = arguments.to_list() res = u'' for e in args: res +=unichr(e.to_uint16()) return this.Js(res) fromCharCode.own['length']['value'] = Js(1) String.define_own_property('fromCharCode', {'value': fromCharCode, 'enumerable': False, 'writable': True, 'configurable': True}) String.define_own_property('prototype', {'value': StringPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) StringPrototype.define_own_property('constructor', {'value': String, 'enumerable': False, 'writable': True, 'configurable': True})
1,069
Python
.py
25
24.4
74
0.4375
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,859
jsarray.py
evilhero_mylar/lib/js2py/constructors/jsarray.py
from ..base import * @Js def Array(): if len(arguments)==0 or len(arguments)>1: return arguments.to_list() a = arguments[0] if isinstance(a, PyJsNumber): length = a.to_uint32() if length!=a.value: raise MakeError('RangeError', 'Invalid array length') temp = Js([]) temp.put('length', a) return temp return [a] Array.create = Array Array.own['length']['value'] = Js(1) @Js def isArray(arg): return arg.Class=='Array' Array.define_own_property('isArray', {'value': isArray, 'enumerable': False, 'writable': True, 'configurable': True}) Array.define_own_property('prototype', {'value': ArrayPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) ArrayPrototype.define_own_property('constructor', {'value': Array, 'enumerable': False, 'writable': True, 'configurable': True})
1,280
Python
.py
31
24.548387
74
0.452936
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,860
jsobject.py
evilhero_mylar/lib/js2py/constructors/jsobject.py
from ..base import * import six #todo Double check everything is OK @Js def Object(): val = arguments.get('0') if val.is_null() or val.is_undefined(): return PyJsObject(prototype=ObjectPrototype) return val.to_object() @Js def object_constructor(): if len(arguments): val = arguments.get('0') if val.TYPE=='Object': #Implementation dependent, but my will simply return :) return val elif val.TYPE in {'Number', 'String', 'Boolean'}: return val.to_object() return PyJsObject(prototype=ObjectPrototype) Object.create = object_constructor Object.own['length']['value'] = Js(1) class ObjectMethods: def getPrototypeOf(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.getPrototypeOf called on non-object') return null if obj.prototype is None else obj.prototype def getOwnPropertyDescriptor (obj, prop): if not obj.is_object(): raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object') return obj.own.get(prop.to_string().value) # will return undefined if we dont have this prop def getOwnPropertyNames(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.getOwnPropertyDescriptor called on non-object') return obj.own.keys() def create(obj): if not (obj.is_object() or obj.is_null()): raise MakeError('TypeError', 'Object prototype may only be an Object or null') temp = PyJsObject(prototype=(None if obj.is_null() else obj)) if len(arguments)>1 and not arguments[1].is_undefined(): if six.PY2: ObjectMethods.defineProperties.__func__(temp, arguments[1]) else: ObjectMethods.defineProperties(temp, arguments[1]) return temp def defineProperty(obj, prop, attrs): if not obj.is_object(): raise MakeError('TypeError', 'Object.defineProperty called on non-object') name = prop.to_string().value if not obj.define_own_property(name, ToPropertyDescriptor(attrs)): raise MakeError('TypeError', 'Cannot redefine property: %s' % name) return obj def defineProperties(obj, properties): if not obj.is_object(): raise MakeError('TypeError', 'Object.defineProperties called on non-object') props = properties.to_object() for name in props: desc = ToPropertyDescriptor(props.get(name.value)) if not obj.define_own_property(name.value, desc): raise MakeError('TypeError', 'Failed to define own property: %s'%name.value) return obj def seal(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.seal called on non-object') for desc in obj.own.values(): desc['configurable'] = False obj.extensible = False return obj def freeze(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.freeze called on non-object') for desc in obj.own.values(): desc['configurable'] = False if is_data_descriptor(desc): desc['writable'] = False obj.extensible = False return obj def preventExtensions(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.preventExtensions on non-object') obj.extensible = False return obj def isSealed(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.isSealed called on non-object') if obj.extensible: return False for desc in obj.own.values(): if desc['configurable']: return False return True def isFrozen(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.isFrozen called on non-object') if obj.extensible: return False for desc in obj.own.values(): if desc['configurable']: return False if is_data_descriptor(desc) and desc['writable']: return False return True def isExtensible(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.isExtensible called on non-object') return obj.extensible def keys(obj): if not obj.is_object(): raise MakeError('TypeError', 'Object.keys called on non-object') return [e for e,d in six.iteritems(obj.own) if d.get('enumerable')] # add methods attached to Object constructor fill_prototype(Object, ObjectMethods, default_attrs) # add constructor to prototype fill_in_props(ObjectPrototype, {'constructor':Object}, default_attrs) # add prototype property to the constructor. Object.define_own_property('prototype', {'value': ObjectPrototype, 'enumerable': False, 'writable': False, 'configurable': False}) # some utility functions: def ToPropertyDescriptor(obj): # page 38 (50 absolute) if obj.TYPE!='Object': raise MakeError('TypeError', 'Can\'t convert non-object to property descriptor') desc = {} if obj.has_property('enumerable'): desc['enumerable'] = obj.get('enumerable').to_boolean().value if obj.has_property('configurable'): desc['configurable'] = obj.get('configurable').to_boolean().value if obj.has_property('value'): desc['value'] = obj.get('value') if obj.has_property('writable'): desc['writable'] = obj.get('writable').to_boolean().value if obj.has_property('get'): cand = obj.get('get') if not (cand.is_undefined() or cand.is_callable()): raise MakeError('TypeError', 'Invalid getter (it has to be a function or undefined)') desc['get'] = cand if obj.has_property('set'): cand = obj.get('set') if not (cand.is_undefined() or cand.is_callable()): raise MakeError('TypeError', 'Invalid setter (it has to be a function or undefined)') desc['set'] = cand if ('get' in desc or 'set' in desc) and ('value' in desc or 'writable' in desc): raise MakeError('TypeError', 'Invalid property. A property cannot both have accessors and be writable or have a value.') return desc
6,464
Python
.py
144
35.534722
129
0.627146
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,861
jsmath.py
evilhero_mylar/lib/js2py/constructors/jsmath.py
from ..base import * import math import random Math = PyJsObject(prototype=ObjectPrototype) Math.Class = 'Math' CONSTANTS = {'E': 2.7182818284590452354, 'LN10': 2.302585092994046, 'LN2': 0.6931471805599453, 'LOG2E': 1.4426950408889634, 'LOG10E': 0.4342944819032518, 'PI': 3.1415926535897932, 'SQRT1_2': 0.7071067811865476, 'SQRT2': 1.4142135623730951} for constant, value in CONSTANTS.items(): Math.define_own_property(constant, {'value': Js(value), 'writable': False, 'enumerable': False, 'configurable': False}) class MathFunctions: def abs(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return abs(a) def acos(x): a = x.to_number().value if a!=a: # it must be a nan return NaN try: return math.acos(a) except: return NaN def asin(x): a = x.to_number().value if a!=a: # it must be a nan return NaN try: return math.asin(a) except: return NaN def atan(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.atan(a) def atan2(y, x): a = x.to_number().value b = y.to_number().value if a!=a or b!=b: # it must be a nan return NaN return math.atan2(b, a) def ceil(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.ceil(a) def floor(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.floor(a) def round(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return round(a) def sin(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.sin(a) def cos(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.cos(a) def tan(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.tan(a) def log(x): a = x.to_number().value if a!=a: # it must be a nan return NaN try: return math.log(a) except: return NaN def exp(x): a = x.to_number().value if a!=a: # it must be a nan return NaN return math.exp(a) def pow(x, y): a = x.to_number().value b = y.to_number().value if a!=a or b!=b: # it must be a nan return NaN try: return a**b except: return NaN def sqrt(x): a = x.to_number().value if a!=a: # it must be a nan return NaN try: return a**0.5 except: return NaN def min(): if not len(arguments): return -Infinity lis = tuple(e.to_number().value for e in arguments.to_list()) if any(e!=e for e in lis): # we dont want NaNs return NaN return min(*lis) def max(): if not len(arguments): return -Infinity lis = tuple(e.to_number().value for e in arguments.to_list()) if any(e!=e for e in lis): # we dont want NaNs return NaN return max(*lis) def random(): return random.random() fill_prototype(Math, MathFunctions, default_attrs)
3,747
Python
.py
128
19.328125
69
0.495135
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,862
jsdate.py
evilhero_mylar/lib/js2py/constructors/jsdate.py
from ..base import * from .time_helpers import * TZ_OFFSET = (time.altzone//3600) ABS_OFFSET = abs(TZ_OFFSET) TZ_NAME = time.tzname[1] ISO_FORMAT = '%s-%s-%sT%s:%s:%s.%sZ' @Js def Date(year, month, date, hours, minutes, seconds, ms): return now().to_string() Date.Class = 'Date' def now(): return PyJsDate(int(time.time()*1000), prototype=DatePrototype) @Js def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this args = arguments y = args[0].to_number() m = args[1].to_number() l = len(args) dt = args[2].to_number() if l>2 else Js(1) h = args[3].to_number() if l>3 else Js(0) mi = args[4].to_number() if l>4 else Js(0) sec = args[5].to_number() if l>5 else Js(0) mili = args[6].to_number() if l>6 else Js(0) if not y.is_nan() and 0<=y.value<=99: y = y + Js(1900) t = TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili))) return PyJsDate(t, prototype=DatePrototype) @Js def parse(string): return PyJsDate(TimeClip(parse_date(string.to_string().value)), prototype=DatePrototype) Date.define_own_property('now', {'value': Js(now), 'enumerable': False, 'writable': True, 'configurable': True}) Date.define_own_property('parse', {'value': parse, 'enumerable': False, 'writable': True, 'configurable': True}) Date.define_own_property('UTC', {'value': UTC, 'enumerable': False, 'writable': True, 'configurable': True}) class PyJsDate(PyJs): Class = 'Date' extensible = True def __init__(self, value, prototype=None): self.value = value self.own = {} self.prototype = prototype # todo fix this problematic datetime part def to_local_dt(self): return datetime.datetime.utcfromtimestamp(UTCToLocal(self.value)//1000) def to_utc_dt(self): return datetime.datetime.utcfromtimestamp(self.value//1000) def local_strftime(self, pattern): if self.value is NaN: return 'Invalid Date' try: dt = self.to_local_dt() except: raise MakeError('TypeError', 'unsupported date range. Will fix in future versions') try: return dt.strftime(pattern) except: raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)') def utc_strftime(self, pattern): if self.value is NaN: return 'Invalid Date' try: dt = self.to_utc_dt() except: raise MakeError('TypeError', 'unsupported date range. Will fix in future versions') try: return dt.strftime(pattern) except: raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)') def parse_date(py_string): return NotImplementedError() def date_constructor(*args): if len(args)>=2: return date_constructor2(*args) elif len(args)==1: return date_constructor1(args[0]) else: return date_constructor0() def date_constructor0(): return now() def date_constructor1(value): v = value.to_primitive() if v._type()=='String': v = parse_date(v.value) else: v = v.to_int() return PyJsDate(TimeClip(v), prototype=DatePrototype) def date_constructor2(*args): y = args[0].to_number() m = args[1].to_number() l = len(args) dt = args[2].to_number() if l>2 else Js(1) h = args[3].to_number() if l>3 else Js(0) mi = args[4].to_number() if l>4 else Js(0) sec = args[5].to_number() if l>5 else Js(0) mili = args[6].to_number() if l>6 else Js(0) if not y.is_nan() and 0<=y.value<=99: y = y + Js(1900) t = TimeClip(LocalToUTC(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))) return PyJsDate(t, prototype=DatePrototype) Date.create = date_constructor DatePrototype = PyJsDate(float('nan'), prototype=ObjectPrototype) def check_date(obj): if obj.Class!='Date': raise MakeError('TypeError', 'this is not a Date object') class DateProto: def toString(): check_date(this) if this.value is NaN: return 'Invalid Date' offset = (UTCToLocal(this.value) - this.value)//msPerHour return this.local_strftime('%a %b %d %Y %H:%M:%S GMT') + '%s00 (%s)' % (pad(offset, 2, True), GetTimeZoneName(this.value)) def toDateString(): check_date(this) return this.local_strftime('%d %B %Y') def toTimeString(): check_date(this) return this.local_strftime('%H:%M:%S') def toLocaleString(): check_date(this) return this.local_strftime('%d %B %Y %H:%M:%S') def toLocaleDateString(): check_date(this) return this.local_strftime('%d %B %Y') def toLocaleTimeString(): check_date(this) return this.local_strftime('%H:%M:%S') def valueOf(): check_date(this) return this.value def getTime(): check_date(this) return this.value def getFullYear(): check_date(this) if this.value is NaN: return NaN return YearFromTime(UTCToLocal(this.value)) def getUTCFullYear(): check_date(this) if this.value is NaN: return NaN return YearFromTime(this.value) def getMonth(): check_date(this) if this.value is NaN: return NaN return MonthFromTime(UTCToLocal(this.value)) def getDate(): check_date(this) if this.value is NaN: return NaN return DateFromTime(UTCToLocal(this.value)) def getUTCMonth(): check_date(this) if this.value is NaN: return NaN return MonthFromTime(this.value) def getUTCDate(): check_date(this) if this.value is NaN: return NaN return DateFromTime(this.value) def getDay(): check_date(this) if this.value is NaN: return NaN return WeekDay(UTCToLocal(this.value)) def getUTCDay(): check_date(this) if this.value is NaN: return NaN return WeekDay(this.value) def getHours(): check_date(this) if this.value is NaN: return NaN return HourFromTime(UTCToLocal(this.value)) def getUTCHours(): check_date(this) if this.value is NaN: return NaN return HourFromTime(this.value) def getMinutes(): check_date(this) if this.value is NaN: return NaN return MinFromTime(UTCToLocal(this.value)) def getUTCMinutes(): check_date(this) if this.value is NaN: return NaN return MinFromTime(this.value) def getSeconds(): check_date(this) if this.value is NaN: return NaN return SecFromTime(UTCToLocal(this.value)) def getUTCSeconds(): check_date(this) if this.value is NaN: return NaN return SecFromTime(this.value) def getMilliseconds(): check_date(this) if this.value is NaN: return NaN return msFromTime(UTCToLocal(this.value)) def getUTCMilliseconds(): check_date(this) if this.value is NaN: return NaN return msFromTime(this.value) def getTimezoneOffset(): check_date(this) if this.value is NaN: return NaN return (UTCToLocal(this.value) - this.value)//60000 def setTime(time): check_date(this) this.value = TimeClip(time.to_number().to_int()) return this.value def setMilliseconds(ms): check_date(this) t = UTCToLocal(this.value) tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int()) u = TimeClip(LocalToUTC(MakeDate(Day(t), tim))) this.value = u return u def setUTCMilliseconds(ms): check_date(this) t = this.value tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int()) u = TimeClip(MakeDate(Day(t), tim)) this.value = u return u # todo Complete all setters! def toUTCString(): check_date(this) return this.utc_strftime('%d %B %Y %H:%M:%S') def toISOString(): check_date(this) t = this.value year = YearFromTime(t) month, day, hour, minute, second, milli = pad(MonthFromTime(t)+1), pad(DateFromTime(t)), pad(HourFromTime(t)), pad(MinFromTime(t)), pad(SecFromTime(t)), pad(msFromTime(t)) return ISO_FORMAT % (unicode(year) if 0<=year<=9999 else pad(year, 6, True), month, day, hour, minute, second, milli) def toJSON(key): o = this.to_object() tv = o.to_primitive('Number') if tv.Class=='Number' and not tv.is_finite(): return this.null toISO = o.get('toISOString') if not toISO.is_callable(): raise this.MakeError('TypeError', 'toISOString is not callable') return toISO.call(o, ()) def pad(num, n=2, sign=False): '''returns n digit string representation of the num''' s = unicode(abs(num)) if len(s)<n: s = '0'*(n-len(s)) + s if not sign: return s if num>=0: return '+'+s else: return '-'+s fill_prototype(DatePrototype, DateProto, default_attrs) Date.define_own_property('prototype', {'value': DatePrototype, 'enumerable': False, 'writable': False, 'configurable': False}) DatePrototype.define_own_property('constructor', {'value': Date, 'enumerable': False, 'writable': True, 'configurable': True})
10,194
Python
.py
282
27.14539
179
0.587105
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,863
jsnumber.py
evilhero_mylar/lib/js2py/prototypes/jsnumber.py
import six if six.PY3: basestring = str long = int xrange = range unicode = str RADIX_SYMBOLS = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', 16: 'g', 17: 'h', 18: 'i', 19: 'j', 20: 'k', 21: 'l', 22: 'm', 23: 'n', 24: 'o', 25: 'p', 26: 'q', 27: 'r', 28: 's', 29: 't', 30: 'u', 31: 'v', 32: 'w', 33: 'x', 34: 'y', 35: 'z'} def to_str_rep(num): if num.is_nan(): return num.Js('NaN') elif num.is_infinity(): sign = '-' if num.value<0 else '' return num.Js(sign+'Infinity') elif isinstance(num.value, (long, int)) or num.value.is_integer(): # dont print .0 return num.Js(unicode(int(num.value))) return num.Js(unicode(num.value)) # accurate enough class NumberPrototype: def toString(radix): if this.Class!='Number': raise this.MakeError('TypeError', 'Number.prototype.valueOf is not generic') if radix.is_undefined(): return to_str_rep(this) r = radix.to_int() if r==10: return to_str_rep(this) if r not in xrange(2, 37): raise this.MakeError('RangeError', 'Number.prototype.toString() radix argument must be between 2 and 36') num = this.to_int() if num < 0: num = -num sign = '-' else: sign = '' res = '' while num: s = RADIX_SYMBOLS[num % r] num = num // r res = s + res return sign + (res if res else '0') def valueOf(): if this.Class!='Number': raise this.MakeError('TypeError', 'Number.prototype.valueOf is not generic') return this.value def toLocaleString(): return this.to_string() def toFixed (fractionDigits): if this.Class!='Number': raise this.MakeError('TypeError', 'Number.prototype.toFixed called on incompatible receiver') digs = fractionDigits.to_int() if digs<0 or digs>20: raise this.MakeError('RangeError', 'toFixed() digits argument must be between 0 and 20') elif this.is_infinity(): return 'Infinity' if this.value>0 else '-Infinity' elif this.is_nan(): return 'NaN' return format(this.value, '-.%df'%digs) def toExponential (fractionDigits): if this.Class!='Number': raise this.MakeError('TypeError', 'Number.prototype.toExponential called on incompatible receiver') digs = fractionDigits.to_int() if digs<0 or digs>20: raise this.MakeError('RangeError', 'toFixed() digits argument must be between 0 and 20') elif this.is_infinity(): return 'Infinity' if this.value>0 else '-Infinity' elif this.is_nan(): return 'NaN' return format(this.value, '-.%de'%digs) def toPrecision (precision): if this.Class!='Number': raise this.MakeError('TypeError', 'Number.prototype.toPrecision called on incompatible receiver') if precision.is_undefined(): return this.to_String() prec = precision.to_int() if this.is_nan(): return 'NaN' elif this.is_infinity(): return 'Infinity' if this.value>0 else '-Infinity' digs = prec - len(str(int(this.value))) if digs>=0: return format(this.value, '-.%df'%digs) else: return format(this.value, '-.%df'%(prec-1))
3,577
Python
.py
85
32.529412
117
0.555651
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,864
jsboolean.py
evilhero_mylar/lib/js2py/prototypes/jsboolean.py
class BooleanPrototype: def toString(): if this.Class!='Boolean': raise this.Js(TypeError)('this must be a boolean') return 'true' if this.value else 'false' def valueOf(): if this.Class!='Boolean': raise this.Js(TypeError)('this must be a boolean') return this.value
339
Python
.py
9
28.777778
62
0.616099
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,865
jserror.py
evilhero_mylar/lib/js2py/prototypes/jserror.py
class ErrorPrototype: def toString(): if this.TYPE!='Object': raise this.MakeError('TypeError', 'Error.prototype.toString called on non-object') name = this.get('name') name = 'Error' if name.is_undefined() else name.to_string().value msg = this.get('message') msg = '' if msg.is_undefined() else msg.to_string().value return name + (name and msg and ': ') + msg
427
Python
.py
9
39.333333
94
0.61244
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,866
jsfunction.py
evilhero_mylar/lib/js2py/prototypes/jsfunction.py
# python 3 support import six if six.PY3: basestring = str long = int xrange = range unicode = str # todo fix apply and bind class FunctionPrototype: def toString(): if not this.is_callable(): raise TypeError('toString is not generic!') args = ', '.join(this.code.__code__.co_varnames[:this.argcount]) return 'function %s(%s) '%(this.func_name, args)+this.source def call(): arguments_ = arguments if not len(arguments): obj = this.Js(None) else: obj = arguments[0] if len(arguments)<=1: args = () else: args = tuple([arguments_[e] for e in xrange(1, len(arguments_))]) return this.call(obj, args) def apply(): if not len(arguments): obj = this.Js(None) else: obj = arguments[0] if len(arguments)<=1: args = () else: appl = arguments[1] args = tuple([appl[e] for e in xrange(len(appl))]) return this.call(obj, args) def bind(thisArg): target = this if not target.is_callable(): raise this.MakeError('Object must be callable in order to be used with bind method') if len(arguments) <= 1: args = () else: args = tuple([arguments[e] for e in xrange(1, len(arguments))]) return this.PyJsBoundFunction(target, thisArg, args)
1,490
Python
.py
45
23.888889
96
0.552834
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,867
jsregexp.py
evilhero_mylar/lib/js2py/prototypes/jsregexp.py
class RegExpPrototype: def toString(): flags = u'' if this.glob: flags += u'g' if this.ignore_case: flags += u'i' if this.multiline: flags += u'm' v = this.value if this.value else '(?:)' return u'/%s/'%v + flags def test(string): return Exec(this, string) is not this.null def exec2(string): # will be changed to exec in base.py. cant name it exec here return Exec(this, string) def Exec(this, string): if this.Class!='RegExp': raise this.MakeError('TypeError', 'RegExp.prototype.exec is not generic!') string = string.to_string() length = len(string) i = this.get('lastIndex').to_int() if this.glob else 0 matched = False while not matched: if i < 0 or i > length: this.put('lastIndex', this.Js(0)) return this.null matched = this.match(string.value, i) i += 1 start, end = matched.span()#[0]+i-1, matched.span()[1]+i-1 if this.glob: this.put('lastIndex', this.Js(end)) arr = this.Js([this.Js(e) for e in [matched.group()]+list(matched.groups())]) arr.put('index', this.Js(start)) arr.put('input', string) return arr
1,252
Python
.py
35
28.142857
84
0.582299
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,868
jsstring.py
evilhero_mylar/lib/js2py/prototypes/jsstring.py
# -*- coding: utf-8 -*- from .jsregexp import Exec import re DIGS = set('0123456789') WHITE = u"\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF" def replacement_template(rep, source, span, npar): """Takes the replacement template and some info about the match and returns filled template """ n = 0 res = '' while n < len(rep)-1: char = rep[n] if char=='$': if rep[n+1]=='$': res += '$' n += 2 continue elif rep[n+1]=='`': # replace with string that is BEFORE match res += source[:span[0]] n += 2 continue elif rep[n+1]=='\'': # replace with string that is AFTER match res += source[span[1]:] n += 2 continue elif rep[n+1] in DIGS: dig = rep[n+1] if n+2<len(rep) and rep[n+2] in DIGS: dig += rep[n+2] num = int(dig) # we will not do any replacements if we dont have this npar or dig is 0 if not num or num>len(npar): res += '$'+dig else: # None - undefined has to be replaced with '' res += npar[num-1] if npar[num-1] else '' n += 1 + len(dig) continue res += char n += 1 if n<len(rep): res += rep[-1] return res ################################################### class StringPrototype: def toString(): if this.Class!='String': raise this.MakeError('TypeError', 'String.prototype.toString is not generic') return this.value def valueOf(): if this.Class!='String': raise this.MakeError('TypeError', 'String.prototype.valueOf is not generic') return this.value def charAt(pos): this.cok() pos = pos.to_int() s = this.to_string() if 0<= pos < len(s.value): char = s.value[pos] if char not in s.CHAR_BANK: s.Js(char) # add char to char bank return s.CHAR_BANK[char] return s.CHAR_BANK[''] def charCodeAt(pos): this.cok() pos = pos.to_int() s = this.to_string() if 0<= pos < len(s.value): return s.Js(ord(s.value[pos])) return s.NaN def concat(): this.cok() s = this.to_string() res = s.value for e in arguments.to_list(): res += e.to_string().value return res def indexOf(searchString, position): this.cok() s = this.to_string().value search = searchString.to_string().value pos = position.to_int() return this.Js(s.find(search, min(max(pos, 0), len(s))) ) def lastIndexOf(searchString, position): this.cok() s = this.to_string().value search = searchString.to_string().value pos = position.to_number() pos = 10**15 if pos.is_nan() else pos.to_int() return s.rfind(search, 0, min(max(pos, 0)+1, len(s))) def localeCompare(that): this.cok() s = this.to_string() that = that.to_string() if s<that: return this.Js(-1) elif s>that: return this.Js(1) return this.Js(0) def match(regexp): this.cok() s = this.to_string() r = this.RegExp(regexp) if regexp.Class!='RegExp' else regexp if not r.glob: return Exec(r, s) r.put('lastIndex', this.Js(0)) found = [] previous_last_index = 0 last_match = True while last_match: result = Exec(r, s) if result.is_null(): last_match=False else: this_index = r.get('lastIndex').value if this_index==previous_last_index: r.put('lastIndex', this.Js(this_index+1)) previous_last_index += 1 else: previous_last_index = this_index matchStr = result.get('0') found.append(matchStr) if not found: return this.null return found def replace(searchValue, replaceValue): # VERY COMPLICATED. to check again. this.cok() string = this.to_string() s = string.value res = '' if not replaceValue.is_callable(): replaceValue = replaceValue.to_string().value func = False else: func = True # Replace all ( global ) if searchValue.Class == 'RegExp' and searchValue.glob: last = 0 for e in re.finditer(searchValue.pat, s): res += s[last:e.span()[0]] if func: # prepare arguments for custom func (replaceValue) args = (e.group(),) + e.groups() + (e.span()[1], string) # convert all types to JS args = map(this.Js, args) res += replaceValue(*args).to_string().value else: res += replacement_template(replaceValue, s, e.span(), e.groups()) last = e.span()[1] res += s[last:] return this.Js(res) elif searchValue.Class=='RegExp': e = re.search(searchValue.pat, s) if e is None: return string span = e.span() pars = e.groups() match = e.group() else: match = searchValue.to_string().value ind = s.find(match) if ind==-1: return string span = ind, ind + len(match) pars = () res = s[:span[0]] if func: args = (match,) + pars + (span[1], string) # convert all types to JS this_ = this args = tuple([this_.Js(x) for x in args]) res += replaceValue(*args).to_string().value else: res += replacement_template(replaceValue, s, span, pars) res += s[span[1]:] return res def search(regexp): this.cok() string = this.to_string() if regexp.Class=='RegExp': rx = regexp else: rx = this.RegExp(regexp) res = re.search(rx.pat, string.value) if res is not None: return this.Js(res.span()[0]) return -1 def slice(start, end): this.cok() s = this.to_string() start = start.to_int() length = len(s.value) end = length if end.is_undefined() else end.to_int() #From = max(length+start, 0) if start<0 else min(length, start) #To = max(length+end, 0) if end<0 else min(length, end) return s.value[start:end] def split (separator, limit): # its a bit different that re.split! this.cok() S = this.to_string() s = S.value lim = 2**32-1 if limit.is_undefined() else limit.to_uint32() if not lim: return [] if separator.is_undefined(): return [s] len_s = len(s) res = [] R = separator if separator.Class=='RegExp' else separator.to_string() if not len_s: if SplitMatch(s, 0, R) is None: return [S] return [] p = q = 0 while q!=len_s: e, cap = SplitMatch(s, q, R) if e is None or e==p: q += 1 continue res.append(s[p:q]) p = q = e if len(res)==lim: return res for element in cap: res.append(this.Js(element)) if len(res)==lim: return res res.append(s[p:]) return res def substring (start, end): this.cok() s = this.to_string().value start = start.to_int() length = len(s) end = length if end.is_undefined() else end.to_int() fstart = min(max(start, 0), length) fend = min(max(end, 0), length) return this.Js(s[min(fstart, fend):max(fstart, fend)]) def substr(start, length): #I hate this function and its description in specification r1 = this.to_string().value r2 = start.to_int() r3 = 10**20 if length.is_undefined() else length.to_int() r4 = len(r1) r5 = r2 if r2>=0 else max(0, r2+r4) r6 = min(max(r3 ,0), r4 - r5) if r6<=0: return '' return r1[r5:r5+r6] def toLowerCase(): this.cok() return this.Js(this.to_string().value.lower()) def toLocaleLowerCase(): this.cok() return this.Js(this.to_string().value.lower()) def toUpperCase(): this.cok() return this.Js(this.to_string().value.upper()) def toLocaleUpperCase(): this.cok() return this.Js(this.to_string().value.upper()) def trim(): this.cok() return this.Js(this.to_string().value.strip(WHITE)) def SplitMatch(s, q, R): # s is Py String to match, q is the py int match start and R is Js RegExp or String. if R.Class=='RegExp': res = R.match(s, q) return (None, ()) if res is None else (res.span()[1], res.groups()) # R is just a string if s[q:].startswith(R.value): return q+len(R.value), () return None, ()
9,689
Python
.py
276
24.199275
167
0.503517
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,869
jsarray.py
evilhero_mylar/lib/js2py/prototypes/jsarray.py
import six if six.PY3: xrange = range import functools def to_arr(this): """Returns Python array from Js array""" return [this.get(str(e)) for e in xrange(len(this))] ARR_STACK = set({}) class ArrayPrototype: def toString(): # this function is wrong but I will leave it here fore debugging purposes. func = this.get('join') if not func.is_callable(): @this.Js def func(): return '[object %s]'%this.Class return func.call(this, ()) def toLocaleString(): array = this.to_object() arr_len = array.get('length').to_uint32() # separator is simply a comma ',' if not arr_len: return '' res = [] for i in xrange(arr_len): element = array[str(i)] if element.is_undefined() or element.is_null(): res.append('') else: cand = element.to_object() str_func = element.get('toLocaleString') if not str_func.is_callable(): raise this.MakeError('TypeError', 'toLocaleString method of item at index %d is not callable'%i) res.append(element.callprop('toLocaleString').value) return ','.join(res) def concat(): array = this.to_object() A = this.Js([]) items = [array] items.extend(to_arr(arguments)) n = 0 for E in items: if E.Class=='Array': k = 0 e_len = len(E) while k<e_len: if E.has_property(str(k)): A.put(str(n), E.get(str(k))) n+=1 k+=1 else: A.put(str(n), E) n+=1 return A def join(separator): ARR_STACK.add(this) array = this.to_object() arr_len = array.get('length').to_uint32() separator = ',' if separator.is_undefined() else separator.to_string().value elems = [] for e in xrange(arr_len): elem = array.get(str(e)) if elem in ARR_STACK: s = '' else: s = elem.to_string().value elems.append(s if not (elem.is_undefined() or elem.is_null()) else '') res = separator.join(elems) ARR_STACK.remove(this) return res def pop(): #todo check array = this.to_object() arr_len = array.get('length').to_uint32() if not arr_len: array.put('length', this.Js(arr_len)) return None ind = str(arr_len-1) element = array.get(ind) array.delete(ind) array.put('length', this.Js(arr_len-1)) return element def push(item): # todo check array = this.to_object() arr_len = array.get('length').to_uint32() to_put = arguments.to_list() i = arr_len for i, e in enumerate(to_put, arr_len): array.put(str(i), e) if to_put: i+=1 array.put('length', this.Js(i)) return i def reverse(): array = this.to_object() # my own algorithm vals = to_arr(array) has_props = [array.has_property(str(e)) for e in xrange(len(array))] vals.reverse() has_props.reverse() for i, val in enumerate(vals): if has_props[i]: array.put(str(i), val) else: array.delete(str(i)) return array def shift(): #todo check array = this.to_object() arr_len = array.get('length').to_uint32() if not arr_len: array.put('length', this.Js(0)) return None first = array.get('0') for k in xrange(1, arr_len): from_s, to_s = str(k), str(k-1) if array.has_property(from_s): array.put(to_s, array.get(from_s)) else: array.delete(to) array.delete(str(arr_len-1)) array.put('length', this.Js(str(arr_len-1))) return first def slice(start, end): # todo check array = this.to_object() arr_len = array.get('length').to_uint32() relative_start = start.to_int() k = max((arr_len + relative_start), 0) if relative_start<0 else min(relative_start, arr_len) relative_end = arr_len if end.is_undefined() else end.to_int() final = max((arr_len + relative_end), 0) if relative_end<0 else min(relative_end, arr_len) res = [] n = 0 while k<final: pk = str(k) if array.has_property(pk): res.append(array.get(pk)) k += 1 n += 1 return res def sort(cmpfn): if not this.Class in {'Array', 'Arguments'}: return this.to_object() # do nothing arr = [] for i in xrange(len(this)): arr.append(this.get(six.text_type(i))) if not arr: return this if not cmpfn.is_callable(): cmpfn = None cmp = lambda a,b: sort_compare(a, b, cmpfn) if six.PY3: key = functools.cmp_to_key(cmp) arr.sort(key=key) else: arr.sort(cmp=cmp) for i in xrange(len(arr)): this.put(six.text_type(i), arr[i]) return this def splice(start, deleteCount): # 1-8 array = this.to_object() arr_len = array.get('length').to_uint32() relative_start = start.to_int() actual_start = max((arr_len + relative_start),0) if relative_start<0 else min(relative_start, arr_len) actual_delete_count = min(max(deleteCount.to_int(),0 ), arr_len - actual_start) k = 0 A = this.Js([]) # 9 while k<actual_delete_count: if array.has_property(str(actual_start+k)): A.put(str(k), array.get(str(actual_start+k))) k += 1 # 10-11 items = to_arr(arguments)[2:] items_len = len(items) # 12 if items_len<actual_delete_count: k = actual_start while k < (arr_len-actual_delete_count): fr = str(k+actual_delete_count) to = str(k+items_len) if array.has_property(fr): array.put(to, array.get(fr)) else: array.delete(to) k += 1 k = arr_len while k > (arr_len - actual_delete_count + items_len): array.delete(str(k-1)) k -= 1 # 13 elif items_len>actual_delete_count: k = arr_len - actual_delete_count while k>actual_start: fr = str(k + actual_delete_count - 1) to = str(k + items_len - 1) if array.has_property(fr): array.put(to, array.get(fr)) else: array.delete(to) k -= 1 # 14-17 k = actual_start while items: E = items.pop(0) array.put(str(k), E) k += 1 array.put('length', this.Js(arr_len - actual_delete_count + items_len)) return A def unshift(): array = this.to_object() arr_len = array.get('length').to_uint32() argCount = len(arguments) k = arr_len while k > 0: fr = str(k - 1) to = str(k + argCount - 1) if array.has_property(fr): array.put(to, array.get(fr)) else: array.delete(to) k -= 1 j = 0 items = to_arr(arguments) while items: E = items.pop(0) array.put(str(j), E) j += 1 array.put('length', this.Js(arr_len + argCount)) return arr_len + argCount def indexOf(searchElement): array = this.to_object() arr_len = array.get('length').to_uint32() if arr_len == 0: return -1 if len(arguments)>1: n = arguments[1].to_int() else: n = 0 if n >= arr_len: return -1 if n >= 0: k = n else: k = arr_len - abs(n) if k < 0: k = 0 while k < arr_len: if array.has_property(str(k)): elementK = array.get(str(k)) if searchElement.strict_equality_comparison(elementK): return k k += 1 return -1 def lastIndexOf(searchElement): array = this.to_object() arr_len = array.get('length').to_uint32() if arr_len == 0: return -1 if len(arguments)>1: n = arguments[1].to_int() else: n = arr_len - 1 if n >= 0: k = min(n, arr_len-1) else: k = arr_len - abs(n) while k >= 0: if array.has_property(str(k)): elementK = array.get(str(k)) if searchElement.strict_equality_comparison(elementK): return k k -= 1 return -1 def every(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') T = arguments[1] k = 0 while k<arr_len: if array.has_property(str(k)): kValue = array.get(str(k)) if not callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value: return False k += 1 return True def some(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') T = arguments[1] k = 0 while k<arr_len: if array.has_property(str(k)): kValue = array.get(str(k)) if callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value: return True k += 1 return False def forEach(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') T = arguments[1] k = 0 while k<arr_len: if array.has_property(str(k)): kValue = array.get(str(k)) callbackfn.call(T, (kValue, this.Js(k), array)) k+=1 def map(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') T = arguments[1] A = this.Js([]) k = 0 while k<arr_len: Pk = str(k) if array.has_property(Pk): kValue = array.get(Pk) mappedValue = callbackfn.call(T, (kValue, this.Js(k), array)) A.define_own_property(Pk, {'value': mappedValue, 'writable': True, 'enumerable': True, 'configurable': True}) k += 1 return A def filter(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') T = arguments[1] res = [] k = 0 while k<arr_len: if array.has_property(str(k)): kValue = array.get(str(k)) if callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value: res.append(kValue) k += 1 return res # converted to js array automatically def reduce(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') if not arr_len and len(arguments)<2: raise this.MakeError('TypeError', 'Reduce of empty array with no initial value') k = 0 if len(arguments)>1: # initial value present accumulator = arguments[1] else: kPresent = False while not kPresent and k<arr_len: kPresent = array.has_property(str(k)) if kPresent: accumulator = array.get(str(k)) k += 1 if not kPresent: raise this.MakeError('TypeError', 'Reduce of empty array with no initial value') while k<arr_len: if array.has_property(str(k)): kValue = array.get(str(k)) accumulator = callbackfn.call(this.undefined, (accumulator, kValue, this.Js(k), array)) k += 1 return accumulator def reduceRight(callbackfn): array = this.to_object() arr_len = array.get('length').to_uint32() if not callbackfn.is_callable(): raise this.MakeError('TypeError', 'callbackfn must be a function') if not arr_len and len(arguments)<2: raise this.MakeError('TypeError', 'Reduce of empty array with no initial value') k = arr_len - 1 if len(arguments)>1: # initial value present accumulator = arguments[1] else: kPresent = False while not kPresent and k>=0: kPresent = array.has_property(str(k)) if kPresent: accumulator = array.get(str(k)) k -= 1 if not kPresent: raise this.MakeError('TypeError', 'Reduce of empty array with no initial value') while k>=0: if array.has_property(str(k)): kValue = array.get(str(k)) accumulator = callbackfn.call(this.undefined, (accumulator, kValue, this.Js(k), array)) k -= 1 return accumulator def sort_compare(a, b, comp): if a is None: if b is None: return 0 return 1 if b is None: if a is None: return 0 return -1 if a.is_undefined(): if b.is_undefined(): return 0 return 1 if b.is_undefined(): if a.is_undefined(): return 0 return -1 if comp is not None: res = comp.call(a.undefined, (a, b)) return res.to_int() x, y = a.to_string(), b.to_string() if x<y: return -1 elif x>y: return 1 return 0
14,886
Python
.py
419
24.190931
116
0.507901
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,870
jsjson.py
evilhero_mylar/lib/js2py/prototypes/jsjson.py
import json from lib.js2py.base import Js indent = '' # python 3 support import six if six.PY3: basestring = str long = int xrange = range unicode = str def parse(text): reviver = arguments[1] s = text.to_string().value try: unfiltered = json.loads(s) except: raise this.MakeError('SyntaxError', 'Could not parse JSON string - Invalid syntax') unfiltered = to_js(this, unfiltered) if reviver.is_callable(): root = this.Js({'': unfiltered}) walk(root, '', reviver) else: return unfiltered def stringify(value, replacer, space): global indent stack = set([]) indent = '' property_list = replacer_function = this.undefined if replacer.is_object(): if replacer.is_callable(): replacer_function = replacer elif replacer.Class=='Array': property_list = [] for e in replacer: v = replacer[e] item = this.undefined if v._type()=='Number': item = v.to_string() elif v._type()=='String': item = v elif v.is_object(): if v.Class in {'String', 'Number'}: item = v.to_string() if not item.is_undefined() and item.value not in property_list: property_list.append(item.value) if space.is_object(): if space.Class=='Number': space = space.to_number() elif space.Class=='String': space = space.to_string() if space._type()=='Number': space = this.Js(min(10, space.to_int())) gap = max(int(space.value), 0)* ' ' elif space._type()=='String': gap = space.value[:10] else: gap = '' return this.Js(Str('', this.Js({'':value}), replacer_function, property_list, gap, stack, space)) def Str(key, holder, replacer_function, property_list, gap, stack, space): value = holder[key] if value.is_object(): to_json = value.get('toJSON') if to_json.is_callable(): value = to_json.call(value, (key,)) if not replacer_function.is_undefined(): value = replacer_function.call(holder, (key, value)) if value.is_object(): if value.Class=='String': value = value.to_string() elif value.Class=='Number': value = value.to_number() elif value.Class=='Boolean': value = value.to_boolean() if value.is_null(): return 'null' elif value.Class=='Boolean': return 'true' if value.value else 'false' elif value._type()=='String': return Quote(value) elif value._type()=='Number': if not value.is_infinity(): return value.to_string() return 'null' if value.is_object() and not value.is_callable(): if value.Class=='Array': return ja(value, stack, gap, property_list, replacer_function, space) else: return jo(value, stack, gap, property_list, replacer_function, space) return None # undefined def jo(value, stack, gap, property_list, replacer_function, space): global indent if value in stack: raise value.MakeError('TypeError', 'Converting circular structure to JSON') stack.add(value) stepback = indent indent += gap if not property_list.is_undefined(): k = property_list else: k = [e.value for e in value] partial = [] for p in k: str_p = value.Js(Str(p, value, replacer_function, property_list, gap, stack, space)) if not str_p.is_undefined(): member = json.dumps(p) + ':' + (' ' if gap else '') + str_p.value # todo not sure here - what space character? partial.append(member) if not partial: final = '{}' else: if not gap: final = '{%s}' % ','.join(partial) else: sep = ',\n'+indent properties = sep.join(partial) final = '{\n'+indent+properties+'\n'+stepback+'}' stack.remove(value) indent = stepback return final def ja(value, stack, gap, property_list, replacer_function, space): global indent if value in stack: raise value.MakeError('TypeError', 'Converting circular structure to JSON') stack.add(value) stepback = indent indent += gap partial = [] length = len(value) for index in xrange(length): index = str(index) str_index = value.Js(Str(index, value, replacer_function, property_list, gap, stack, space)) if str_index.is_undefined(): partial.append('null') else: partial.append(str_index.value) if not partial: final = '[]' else: if not gap: final = '[%s]' % ','.join(partial) else: sep = ',\n'+indent properties = sep.join(partial) final = '[\n'+indent +properties+'\n'+stepback+']' stack.remove(value) indent = stepback return final def Quote(string): return string.Js(json.dumps(string.value)) def to_js(this, d): if isinstance(d, dict): return this.Js({k:this.Js(v) for k, v in six.iteritems(d)}) return this.Js(d) def walk(holder, name, reviver): val = holder.get(name) if val.Class=='Array': for i in xrange(len(val)): i = unicode(i) new_element = walk(val, i, reviver) if new_element.is_undefined(): val.delete(i) else: new_element.put(i, new_element) elif val.is_object(): for key in val: new_element = walk(val, key, reviver) if new_element.is_undefined(): val.delete(key) else: val.put(key, new_element) return reviver.call(holder, (name, val)) JSON = Js({}) JSON.define_own_property('parse', {'value': Js(parse), 'enumerable': False, 'writable': True, 'configurable': True}) JSON.define_own_property('stringify', {'value': Js(stringify), 'enumerable': False, 'writable': True, 'configurable': True})
6,388
Python
.py
179
26.256983
123
0.556653
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,871
jsobject.py
evilhero_mylar/lib/js2py/prototypes/jsobject.py
class ObjectPrototype: def toString(): return '[object %s]'%this.Class def valueOf(): return this.to_object() def toLocaleString(): return this.callprop('toString') def hasOwnProperty(prop): return this.get_own_property(prop.to_string().value) is not None def isPrototypeOf(obj): #a bit stupid specification but well # for example Object.prototype.isPrototypeOf.call((5).__proto__, 5) gives false if not obj.is_object(): return False while 1: obj = obj.prototype if obj is None or obj.is_null(): return False if obj is this: return True def propertyIsEnumerable(prop): cand = this.own.get(prop.to_string().value) return cand is not None and cand.get('enumerable')
861
Python
.py
23
27.869565
87
0.608485
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,872
_cpmodpy.py
evilhero_mylar/lib/cherrypy/_cpmodpy.py
"""Native adapter for serving CherryPy via mod_python Basic usage: ########################################## # Application in a module called myapp.py ########################################## import cherrypy class Root: @cherrypy.expose def index(self): return 'Hi there, Ho there, Hey there' # We will use this method from the mod_python configuration # as the entry point to our application def setup_server(): cherrypy.tree.mount(Root()) cherrypy.config.update({'environment': 'production', 'log.screen': False, 'show_tracebacks': False}) ########################################## # mod_python settings for apache2 # This should reside in your httpd.conf # or a file that will be loaded at # apache startup ########################################## # Start DocumentRoot "/" Listen 8080 LoadModule python_module /usr/lib/apache2/modules/mod_python.so <Location "/"> PythonPath "sys.path+['/path/to/my/application']" SetHandler python-program PythonHandler cherrypy._cpmodpy::handler PythonOption cherrypy.setup myapp::setup_server PythonDebug On </Location> # End The actual path to your mod_python.so is dependent on your environment. In this case we suppose a global mod_python installation on a Linux distribution such as Ubuntu. We do set the PythonPath configuration setting so that your application can be found by from the user running the apache2 instance. Of course if your application resides in the global site-package this won't be needed. Then restart apache2 and access http://127.0.0.1:8080 """ import logging import sys import cherrypy from cherrypy._cpcompat import BytesIO, copyitems, ntob from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil # ------------------------------ Request-handling def setup(req): from mod_python import apache # Run any setup functions defined by a "PythonOption cherrypy.setup" # directive. options = req.get_options() if 'cherrypy.setup' in options: for function in options['cherrypy.setup'].split(): atoms = function.split('::', 1) if len(atoms) == 1: mod = __import__(atoms[0], globals(), locals()) else: modname, fname = atoms mod = __import__(modname, globals(), locals(), [fname]) func = getattr(mod, fname) func() cherrypy.config.update({'log.screen': False, "tools.ignore_headers.on": True, "tools.ignore_headers.headers": ['Range'], }) engine = cherrypy.engine if hasattr(engine, "signal_handler"): engine.signal_handler.unsubscribe() if hasattr(engine, "console_control_handler"): engine.console_control_handler.unsubscribe() engine.autoreload.unsubscribe() cherrypy.server.unsubscribe() def _log(msg, level): newlevel = apache.APLOG_ERR if logging.DEBUG >= level: newlevel = apache.APLOG_DEBUG elif logging.INFO >= level: newlevel = apache.APLOG_INFO elif logging.WARNING >= level: newlevel = apache.APLOG_WARNING # On Windows, req.server is required or the msg will vanish. See # http://www.modpython.org/pipermail/mod_python/2003-October/014291.html # Also, "When server is not specified...LogLevel does not apply..." apache.log_error(msg, newlevel, req.server) engine.subscribe('log', _log) engine.start() def cherrypy_cleanup(data): engine.exit() try: # apache.register_cleanup wasn't available until 3.1.4. apache.register_cleanup(cherrypy_cleanup) except AttributeError: req.server.register_cleanup(req, cherrypy_cleanup) class _ReadOnlyRequest: expose = ('read', 'readline', 'readlines') def __init__(self, req): for method in self.expose: self.__dict__[method] = getattr(req, method) recursive = False _isSetUp = False def handler(req): from mod_python import apache try: global _isSetUp if not _isSetUp: setup(req) _isSetUp = True # Obtain a Request object from CherryPy local = req.connection.local_addr local = httputil.Host( local[0], local[1], req.connection.local_host or "") remote = req.connection.remote_addr remote = httputil.Host( remote[0], remote[1], req.connection.remote_host or "") scheme = req.parsed_uri[0] or 'http' req.get_basic_auth_pw() try: # apache.mpm_query only became available in mod_python 3.1 q = apache.mpm_query threaded = q(apache.AP_MPMQ_IS_THREADED) forked = q(apache.AP_MPMQ_IS_FORKED) except AttributeError: bad_value = ("You must provide a PythonOption '%s', " "either 'on' or 'off', when running a version " "of mod_python < 3.1") threaded = options.get('multithread', '').lower() if threaded == 'on': threaded = True elif threaded == 'off': threaded = False else: raise ValueError(bad_value % "multithread") forked = options.get('multiprocess', '').lower() if forked == 'on': forked = True elif forked == 'off': forked = False else: raise ValueError(bad_value % "multiprocess") sn = cherrypy.tree.script_name(req.uri or "/") if sn is None: send_response(req, '404 Not Found', [], '') else: app = cherrypy.tree.apps[sn] method = req.method path = req.uri qs = req.args or "" reqproto = req.protocol headers = copyitems(req.headers_in) rfile = _ReadOnlyRequest(req) prev = None try: redirections = [] while True: request, response = app.get_serving(local, remote, scheme, "HTTP/1.1") request.login = req.user request.multithread = bool(threaded) request.multiprocess = bool(forked) request.app = app request.prev = prev # Run the CherryPy Request object and obtain the response try: request.run(method, path, qs, reqproto, headers, rfile) break except cherrypy.InternalRedirect: ir = sys.exc_info()[1] app.release_serving() prev = request if not recursive: if ir.path in redirections: raise RuntimeError( "InternalRedirector visited the same URL " "twice: %r" % ir.path) else: # Add the *previous* path_info + qs to # redirections. if qs: qs = "?" + qs redirections.append(sn + path + qs) # Munge environment and try again. method = "GET" path = ir.path qs = ir.query_string rfile = BytesIO() send_response( req, response.output_status, response.header_list, response.body, response.stream) finally: app.release_serving() except: tb = format_exc() cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR) s, h, b = bare_error() send_response(req, s, h, b) return apache.OK def send_response(req, status, headers, body, stream=False): # Set response status req.status = int(status[:3]) # Set response headers req.content_type = "text/plain" for header, value in headers: if header.lower() == 'content-type': req.content_type = value continue req.headers_out.add(header, value) if stream: # Flush now so the status and headers are sent immediately. req.flush() # Set response body if isinstance(body, basestring): req.write(body) else: for seg in body: req.write(seg) # --------------- Startup tools for CherryPy + mod_python --------------- # import os import re try: import subprocess def popen(fullcmd): p = subprocess.Popen(fullcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) return p.stdout except ImportError: def popen(fullcmd): pipein, pipeout = os.popen4(fullcmd) return pipeout def read_process(cmd, args=""): fullcmd = "%s %s" % (cmd, args) pipeout = popen(fullcmd) try: firstline = pipeout.readline() cmd_not_found = re.search( ntob("(not recognized|No such file|not found)"), firstline, re.IGNORECASE ) if cmd_not_found: raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output class ModPythonServer(object): template = """ # Apache2 server configuration file for running CherryPy with mod_python. DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so <Location %(loc)s> SetHandler python-program PythonHandler %(handler)s PythonDebug On %(opts)s </Location> """ def __init__(self, loc="/", port=80, opts=None, apache_path="apache", handler="cherrypy._cpmodpy::handler"): self.loc = loc self.port = port self.opts = opts self.apache_path = apache_path self.handler = handler def start(self): opts = "".join([" PythonOption %s %s\n" % (k, v) for k, v in self.opts]) conf_data = self.template % {"port": self.port, "loc": self.loc, "opts": opts, "handler": self.handler, } mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf") f = open(mpconf, 'wb') try: f.write(conf_data) finally: f.close() response = read_process(self.apache_path, "-k start -f %s" % mpconf) self.ready = True return response def stop(self): os.popen("apache -k stop") self.ready = False
11,151
Python
.py
290
27.47931
80
0.549639
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,873
_cpchecker.py
evilhero_mylar/lib/cherrypy/_cpchecker.py
import os import warnings import cherrypy from cherrypy._cpcompat import iteritems, copykeys, builtins class Checker(object): """A checker for CherryPy sites and their mounted applications. When this object is called at engine startup, it executes each of its own methods whose names start with ``check_``. If you wish to disable selected checks, simply add a line in your global config which sets the appropriate method to False:: [global] checker.check_skipped_app_config = False You may also dynamically add or replace ``check_*`` methods in this way. """ on = True """If True (the default), run all checks; if False, turn off all checks.""" def __init__(self): self._populate_known_types() def __call__(self): """Run all check_* methods.""" if self.on: oldformatwarning = warnings.formatwarning warnings.formatwarning = self.formatwarning try: for name in dir(self): if name.startswith("check_"): method = getattr(self, name) if method and hasattr(method, '__call__'): method() finally: warnings.formatwarning = oldformatwarning def formatwarning(self, message, category, filename, lineno, line=None): """Function to format a warning.""" return "CherryPy Checker:\n%s\n\n" % message # This value should be set inside _cpconfig. global_config_contained_paths = False def check_app_config_entries_dont_start_with_script_name(self): """Check for Application config with sections that repeat script_name. """ for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: continue if sn == '': continue sn_atoms = sn.strip("/").split("/") for key in app.config.keys(): key_atoms = key.strip("/").split("/") if key_atoms[:len(sn_atoms)] == sn_atoms: warnings.warn( "The application mounted at %r has config " "entries that start with its script name: %r" % (sn, key)) def check_site_config_entries_in_app_config(self): """Check for mounted Applications that have site-scoped config.""" for sn, app in iteritems(cherrypy.tree.apps): if not isinstance(app, cherrypy.Application): continue msg = [] for section, entries in iteritems(app.config): if section.startswith('/'): for key, value in iteritems(entries): for n in ("engine.", "server.", "tree.", "checker."): if key.startswith(n): msg.append("[%s] %s = %s" % (section, key, value)) if msg: msg.insert(0, "The application mounted at %r contains the " "following config entries, which are only allowed " "in site-wide config. Move them to a [global] " "section and pass them to cherrypy.config.update() " "instead of tree.mount()." % sn) warnings.warn(os.linesep.join(msg)) def check_skipped_app_config(self): """Check for mounted Applications that have no config.""" for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: msg = "The Application mounted at %r has an empty config." % sn if self.global_config_contained_paths: msg += (" It looks like the config you passed to " "cherrypy.config.update() contains application-" "specific sections. You must explicitly pass " "application config via " "cherrypy.tree.mount(..., config=app_config)") warnings.warn(msg) return def check_app_config_brackets(self): """Check for Application config with extraneous brackets in section names. """ for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: continue for key in app.config.keys(): if key.startswith("[") or key.endswith("]"): warnings.warn( "The application mounted at %r has config " "section names with extraneous brackets: %r. " "Config *files* need brackets; config *dicts* " "(e.g. passed to tree.mount) do not." % (sn, key)) def check_static_paths(self): """Check Application config for incorrect static paths.""" # Use the dummy Request object in the main thread. request = cherrypy.request for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue request.app = app for section in app.config: # get_resource will populate request.config request.get_resource(section + "/dummy.html") conf = request.config.get if conf("tools.staticdir.on", False): msg = "" root = conf("tools.staticdir.root") dir = conf("tools.staticdir.dir") if dir is None: msg = "tools.staticdir.dir is not set." else: fulldir = "" if os.path.isabs(dir): fulldir = dir if root: msg = ("dir is an absolute path, even " "though a root is provided.") testdir = os.path.join(root, dir[1:]) if os.path.exists(testdir): msg += ( "\nIf you meant to serve the " "filesystem folder at %r, remove the " "leading slash from dir." % (testdir,)) else: if not root: msg = ( "dir is a relative path and " "no root provided.") else: fulldir = os.path.join(root, dir) if not os.path.isabs(fulldir): msg = ("%r is not an absolute path." % ( fulldir,)) if fulldir and not os.path.exists(fulldir): if msg: msg += "\n" msg += ("%r (root + dir) is not an existing " "filesystem path." % fulldir) if msg: warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r" % (msg, section, root, dir)) # -------------------------- Compatibility -------------------------- # obsolete = { 'server.default_content_type': 'tools.response_headers.headers', 'log_access_file': 'log.access_file', 'log_config_options': None, 'log_file': 'log.error_file', 'log_file_not_found': None, 'log_request_headers': 'tools.log_headers.on', 'log_to_screen': 'log.screen', 'show_tracebacks': 'request.show_tracebacks', 'throw_errors': 'request.throw_errors', 'profiler.on': ('cherrypy.tree.mount(profiler.make_app(' 'cherrypy.Application(Root())))'), } deprecated = {} def _compat(self, config): """Process config and warn on each obsolete or deprecated entry.""" for section, conf in config.items(): if isinstance(conf, dict): for k, v in conf.items(): if k in self.obsolete: warnings.warn("%r is obsolete. Use %r instead.\n" "section: [%s]" % (k, self.obsolete[k], section)) elif k in self.deprecated: warnings.warn("%r is deprecated. Use %r instead.\n" "section: [%s]" % (k, self.deprecated[k], section)) else: if section in self.obsolete: warnings.warn("%r is obsolete. Use %r instead." % (section, self.obsolete[section])) elif section in self.deprecated: warnings.warn("%r is deprecated. Use %r instead." % (section, self.deprecated[section])) def check_compatibility(self): """Process config and warn on each obsolete or deprecated entry.""" self._compat(cherrypy.config) for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._compat(app.config) # ------------------------ Known Namespaces ------------------------ # extra_config_namespaces = [] def _known_ns(self, app): ns = ["wsgi"] ns.extend(copykeys(app.toolboxes)) ns.extend(copykeys(app.namespaces)) ns.extend(copykeys(app.request_class.namespaces)) ns.extend(copykeys(cherrypy.config.namespaces)) ns += self.extra_config_namespaces for section, conf in app.config.items(): is_path_section = section.startswith("/") if is_path_section and isinstance(conf, dict): for k, v in conf.items(): atoms = k.split(".") if len(atoms) > 1: if atoms[0] not in ns: # Spit out a special warning if a known # namespace is preceded by "cherrypy." if atoms[0] == "cherrypy" and atoms[1] in ns: msg = ( "The config entry %r is invalid; " "try %r instead.\nsection: [%s]" % (k, ".".join(atoms[1:]), section)) else: msg = ( "The config entry %r is invalid, " "because the %r config namespace " "is unknown.\n" "section: [%s]" % (k, atoms[0], section)) warnings.warn(msg) elif atoms[0] == "tools": if atoms[1] not in dir(cherrypy.tools): msg = ( "The config entry %r may be invalid, " "because the %r tool was not found.\n" "section: [%s]" % (k, atoms[1], section)) warnings.warn(msg) def check_config_namespaces(self): """Process config and warn on each unknown config namespace.""" for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._known_ns(app) # -------------------------- Config Types -------------------------- # known_config_types = {} def _populate_known_types(self): b = [x for x in vars(builtins).values() if type(x) is type(str)] def traverse(obj, namespace): for name in dir(obj): # Hack for 3.2's warning about body_params if name == 'body_params': continue vtype = type(getattr(obj, name, None)) if vtype in b: self.known_config_types[namespace + "." + name] = vtype traverse(cherrypy.request, "request") traverse(cherrypy.response, "response") traverse(cherrypy.server, "server") traverse(cherrypy.engine, "engine") traverse(cherrypy.log, "log") def _known_types(self, config): msg = ("The config entry %r in section %r is of type %r, " "which does not match the expected type %r.") for section, conf in config.items(): if isinstance(conf, dict): for k, v in conf.items(): if v is not None: expected_type = self.known_config_types.get(k, None) vtype = type(v) if expected_type and vtype != expected_type: warnings.warn(msg % (k, section, vtype.__name__, expected_type.__name__)) else: k, v = section, conf if v is not None: expected_type = self.known_config_types.get(k, None) vtype = type(v) if expected_type and vtype != expected_type: warnings.warn(msg % (k, section, vtype.__name__, expected_type.__name__)) def check_config_types(self): """Assert that config values are of the same type as default values.""" self._known_types(cherrypy.config) for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._known_types(app.config) # -------------------- Specific config warnings -------------------- # def check_localhost(self): """Warn if any socket_host is 'localhost'. See #711.""" for k, v in cherrypy.config.items(): if k == 'server.socket_host' and v == 'localhost': warnings.warn("The use of 'localhost' as a socket host can " "cause problems on newer systems, since " "'localhost' can map to either an IPv4 or an " "IPv6 address. You should use '127.0.0.1' " "or '[::1]' instead.")
15,034
Python
.py
295
32.515254
79
0.472249
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,874
_cpcompat.py
evilhero_mylar/lib/cherrypy/_cpcompat.py
"""Compatibility code for using CherryPy with various versions of Python. CherryPy 3.2 is compatible with Python versions 2.3+. This module provides a useful abstraction over the differences between Python versions, sometimes by preferring a newer idiom, sometimes an older one, and sometimes a custom one. In particular, Python 2 uses str and '' for byte strings, while Python 3 uses str and '' for unicode strings. We will call each of these the 'native string' type for each version. Because of this major difference, this module provides new 'bytestr', 'unicodestr', and 'nativestr' attributes, as well as two functions: 'ntob', which translates native strings (of type 'str') into byte strings regardless of Python version, and 'ntou', which translates native strings to unicode strings. This also provides a 'BytesIO' name for dealing specifically with bytes, and a 'StringIO' name for dealing with native strings. It also provides a 'base64_decode' function with native strings as input and output. """ import os import re import sys import threading if sys.version_info >= (3, 0): py3k = True bytestr = bytes unicodestr = str nativestr = unicodestr basestring = (bytes, str) def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ assert_native(n) # In Python 3, the native string type is unicode return n.encode(encoding) def ntou(n, encoding='ISO-8859-1'): """Return the given native string as a unicode string with the given encoding. """ assert_native(n) # In Python 3, the native string type is unicode return n def tonative(n, encoding='ISO-8859-1'): """Return the given string as a native string in the given encoding.""" # In Python 3, the native string type is unicode if isinstance(n, bytes): return n.decode(encoding) return n # type("") from io import StringIO # bytes: from io import BytesIO as BytesIO else: # Python 2 py3k = False bytestr = str unicodestr = unicode nativestr = bytestr basestring = basestring def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ assert_native(n) # In Python 2, the native string type is bytes. Assume it's already # in the given encoding, which for ISO-8859-1 is almost always what # was intended. return n def ntou(n, encoding='ISO-8859-1'): """Return the given native string as a unicode string with the given encoding. """ assert_native(n) # In Python 2, the native string type is bytes. # First, check for the special encoding 'escape'. The test suite uses # this to signal that it wants to pass a string with embedded \uXXXX # escapes, but without having to prefix it with u'' for Python 2, # but no prefix for Python 3. if encoding == 'escape': return unicode( re.sub(r'\\u([0-9a-zA-Z]{4})', lambda m: unichr(int(m.group(1), 16)), n.decode('ISO-8859-1'))) # Assume it's already in the given encoding, which for ISO-8859-1 # is almost always what was intended. return n.decode(encoding) def tonative(n, encoding='ISO-8859-1'): """Return the given string as a native string in the given encoding.""" # In Python 2, the native string type is bytes. if isinstance(n, unicode): return n.encode(encoding) return n try: # type("") from cStringIO import StringIO except ImportError: # type("") from StringIO import StringIO # bytes: BytesIO = StringIO def assert_native(n): if not isinstance(n, nativestr): raise TypeError("n must be a native str (got %s)" % type(n).__name__) try: set = set except NameError: from sets import Set as set try: # Python 3.1+ from base64 import decodebytes as _base64_decodebytes except ImportError: # Python 3.0- # since CherryPy claims compability with Python 2.3, we must use # the legacy API of base64 from base64 import decodestring as _base64_decodebytes def base64_decode(n, encoding='ISO-8859-1'): """Return the native string base64-decoded (as a native string).""" if isinstance(n, unicodestr): b = n.encode(encoding) else: b = n b = _base64_decodebytes(b) if nativestr is unicodestr: return b.decode(encoding) else: return b try: # Python 2.5+ from hashlib import md5 except ImportError: from md5 import new as md5 try: # Python 2.5+ from hashlib import sha1 as sha except ImportError: from sha import new as sha try: sorted = sorted except NameError: def sorted(i): i = i[:] i.sort() return i try: reversed = reversed except NameError: def reversed(x): i = len(x) while i > 0: i -= 1 yield x[i] try: # Python 3 from urllib.parse import urljoin, urlencode from urllib.parse import quote, quote_plus from urllib.request import unquote, urlopen from urllib.request import parse_http_list, parse_keqv_list except ImportError: # Python 2 from urlparse import urljoin from urllib import urlencode, urlopen from urllib import quote, quote_plus from urllib import unquote from urllib2 import parse_http_list, parse_keqv_list try: from threading import local as threadlocal except ImportError: from cherrypy._cpthreadinglocal import local as threadlocal try: dict.iteritems # Python 2 iteritems = lambda d: d.iteritems() copyitems = lambda d: d.items() except AttributeError: # Python 3 iteritems = lambda d: d.items() copyitems = lambda d: list(d.items()) try: dict.iterkeys # Python 2 iterkeys = lambda d: d.iterkeys() copykeys = lambda d: d.keys() except AttributeError: # Python 3 iterkeys = lambda d: d.keys() copykeys = lambda d: list(d.keys()) try: dict.itervalues # Python 2 itervalues = lambda d: d.itervalues() copyvalues = lambda d: d.values() except AttributeError: # Python 3 itervalues = lambda d: d.values() copyvalues = lambda d: list(d.values()) try: # Python 3 import builtins except ImportError: # Python 2 import __builtin__ as builtins try: # Python 2. We try Python 2 first clients on Python 2 # don't try to import the 'http' module from cherrypy.lib from Cookie import SimpleCookie, CookieError from httplib import BadStatusLine, HTTPConnection, IncompleteRead from httplib import NotConnected from BaseHTTPServer import BaseHTTPRequestHandler except ImportError: # Python 3 from http.cookies import SimpleCookie, CookieError from http.client import BadStatusLine, HTTPConnection, IncompleteRead from http.client import NotConnected from http.server import BaseHTTPRequestHandler # Some platforms don't expose HTTPSConnection, so handle it separately if py3k: try: from http.client import HTTPSConnection except ImportError: # Some platforms which don't have SSL don't expose HTTPSConnection HTTPSConnection = None else: try: from httplib import HTTPSConnection except ImportError: HTTPSConnection = None try: # Python 2 xrange = xrange except NameError: # Python 3 xrange = range import threading if hasattr(threading.Thread, "daemon"): # Python 2.6+ def get_daemon(t): return t.daemon def set_daemon(t, val): t.daemon = val else: def get_daemon(t): return t.isDaemon() def set_daemon(t, val): t.setDaemon(val) try: from email.utils import formatdate def HTTPDate(timeval=None): return formatdate(timeval, usegmt=True) except ImportError: from rfc822 import formatdate as HTTPDate try: # Python 3 from urllib.parse import unquote as parse_unquote def unquote_qs(atom, encoding, errors='strict'): return parse_unquote( atom.replace('+', ' '), encoding=encoding, errors=errors) except ImportError: # Python 2 from urllib import unquote as parse_unquote def unquote_qs(atom, encoding, errors='strict'): return parse_unquote(atom.replace('+', ' ')).decode(encoding, errors) try: # Prefer simplejson, which is usually more advanced than the builtin # module. import simplejson as json json_decode = json.JSONDecoder().decode _json_encode = json.JSONEncoder().iterencode except ImportError: if sys.version_info >= (2, 6): # Python >=2.6 : json is part of the standard library import json json_decode = json.JSONDecoder().decode _json_encode = json.JSONEncoder().iterencode else: json = None def json_decode(s): raise ValueError('No JSON library is available') def _json_encode(s): raise ValueError('No JSON library is available') finally: if json and py3k: # The two Python 3 implementations (simplejson/json) # outputs str. We need bytes. def json_encode(value): for chunk in _json_encode(value): yield chunk.encode('utf8') else: json_encode = _json_encode try: import cPickle as pickle except ImportError: # In Python 2, pickle is a Python version. # In Python 3, pickle is the sped-up C version. import pickle try: os.urandom(20) import binascii def random20(): return binascii.hexlify(os.urandom(20)).decode('ascii') except (AttributeError, NotImplementedError): import random # os.urandom not available until Python 2.4. Fall back to random.random. def random20(): return sha('%s' % random.random()).hexdigest() try: from _thread import get_ident as get_thread_ident except ImportError: from thread import get_ident as get_thread_ident try: # Python 3 next = next except NameError: # Python 2 def next(i): return i.next() if sys.version_info >= (3, 3): Timer = threading.Timer Event = threading.Event else: # Python 3.2 and earlier Timer = threading._Timer Event = threading._Event # Prior to Python 2.6, the Thread class did not have a .daemon property. # This mix-in adds that property. class SetDaemonProperty: def __get_daemon(self): return self.isDaemon() def __set_daemon(self, daemon): self.setDaemon(daemon) if sys.version_info < (2, 6): daemon = property(__get_daemon, __set_daemon)
10,874
Python
.py
330
27.251515
79
0.674388
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,875
_cpthreadinglocal.py
evilhero_mylar/lib/cherrypy/_cpthreadinglocal.py
# This is a backport of Python-2.4's threading.local() implementation """Thread-local objects (Note that this module provides a Python version of thread threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the local class from threading.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = mydata.__dict__.items() ... items.sort() ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ # Threading import is at end class _localbase(object): __slots__ = '_local__key', '_local__args', '_local__lock' def __new__(cls, *args, **kw): self = object.__new__(cls) key = 'thread.local.' + str(id(self)) object.__setattr__(self, '_local__key', key) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) if args or kw and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. dict = object.__getattribute__(self, '__dict__') currentThread().__dict__[key] = dict return self def _patch(self): key = object.__getattribute__(self, '_local__key') d = currentThread().__dict__.get(key) if d is None: d = {} currentThread().__dict__[key] = d object.__setattr__(self, '__dict__', d) # we have a new instance dict, so call out __init__ if we have # one cls = type(self) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(self, '_local__args') cls.__init__(self, *args, **kw) else: object.__setattr__(self, '__dict__', d) class local(_localbase): def __getattribute__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__getattribute__(self, name) finally: lock.release() def __setattr__(self, name, value): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__setattr__(self, name, value) finally: lock.release() def __delattr__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__delattr__(self, name) finally: lock.release() def __del__(): threading_enumerate = enumerate __getattribute__ = object.__getattribute__ def __del__(self): key = __getattribute__(self, '_local__key') try: threads = list(threading_enumerate()) except: # if enumerate fails, as it seems to do during # shutdown, we'll skip cleanup under the assumption # that there is nothing to clean up return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: # Thread is dying, rest in peace continue if key in __dict__: try: del __dict__[key] except KeyError: pass # didn't have anything in this thread return __del__ __del__ = __del__() from threading import currentThread, enumerate, RLock
6,619
Python
.py
184
29.592391
73
0.600815
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,876
_cprequest.py
evilhero_mylar/lib/cherrypy/_cprequest.py
import os import sys import time import warnings import cherrypy from cherrypy._cpcompat import basestring, copykeys, ntob, unicodestr from cherrypy._cpcompat import SimpleCookie, CookieError, py3k from cherrypy import _cpreqbody, _cpconfig from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil, file_generator class Hook(object): """A callback and its metadata: failsafe, priority, and kwargs.""" callback = None """ The bare callable that this Hook object is wrapping, which will be called when the Hook is called.""" failsafe = False """ If True, the callback is guaranteed to run even if other callbacks from the same call point raise exceptions.""" priority = 50 """ Defines the order of execution for a list of Hooks. Priority numbers should be limited to the closed interval [0, 100], but values outside this range are acceptable, as are fractional values.""" kwargs = {} """ A set of keyword arguments that will be passed to the callable on each call.""" def __init__(self, callback, failsafe=None, priority=None, **kwargs): self.callback = callback if failsafe is None: failsafe = getattr(callback, "failsafe", False) self.failsafe = failsafe if priority is None: priority = getattr(callback, "priority", 50) self.priority = priority self.kwargs = kwargs def __lt__(self, other): # Python 3 return self.priority < other.priority def __cmp__(self, other): # Python 2 return cmp(self.priority, other.priority) def __call__(self): """Run self.callback(**self.kwargs).""" return self.callback(**self.kwargs) def __repr__(self): cls = self.__class__ return ("%s.%s(callback=%r, failsafe=%r, priority=%r, %s)" % (cls.__module__, cls.__name__, self.callback, self.failsafe, self.priority, ", ".join(['%s=%r' % (k, v) for k, v in self.kwargs.items()]))) class HookMap(dict): """A map of call points to lists of callbacks (Hook objects).""" def __new__(cls, points=None): d = dict.__new__(cls) for p in points or []: d[p] = [] return d def __init__(self, *a, **kw): pass def attach(self, point, callback, failsafe=None, priority=None, **kwargs): """Append a new Hook made from the supplied arguments.""" self[point].append(Hook(callback, failsafe, priority, **kwargs)) def run(self, point): """Execute all registered Hooks (callbacks) for the given point.""" exc = None hooks = self[point] hooks.sort() for hook in hooks: # Some hooks are guaranteed to run even if others at # the same hookpoint fail. We will still log the failure, # but proceed on to the next hook. The only way # to stop all processing from one of these hooks is # to raise SystemExit and stop the whole server. if exc is None or hook.failsafe: try: hook() except (KeyboardInterrupt, SystemExit): raise except (cherrypy.HTTPError, cherrypy.HTTPRedirect, cherrypy.InternalRedirect): exc = sys.exc_info()[1] except: exc = sys.exc_info()[1] cherrypy.log(traceback=True, severity=40) if exc: raise exc def __copy__(self): newmap = self.__class__() # We can't just use 'update' because we want copies of the # mutable values (each is a list) as well. for k, v in self.items(): newmap[k] = v[:] return newmap copy = __copy__ def __repr__(self): cls = self.__class__ return "%s.%s(points=%r)" % ( cls.__module__, cls.__name__, copykeys(self) ) # Config namespace handlers def hooks_namespace(k, v): """Attach bare hooks declared in config.""" # Use split again to allow multiple hooks for a single # hookpoint per path (e.g. "hooks.before_handler.1"). # Little-known fact you only get from reading source ;) hookpoint = k.split(".", 1)[0] if isinstance(v, basestring): v = cherrypy.lib.attributes(v) if not isinstance(v, Hook): v = Hook(v) cherrypy.serving.request.hooks[hookpoint].append(v) def request_namespace(k, v): """Attach request attributes declared in config.""" # Provides config entries to set request.body attrs (like # attempt_charsets). if k[:5] == 'body.': setattr(cherrypy.serving.request.body, k[5:], v) else: setattr(cherrypy.serving.request, k, v) def response_namespace(k, v): """Attach response attributes declared in config.""" # Provides config entries to set default response headers # http://cherrypy.org/ticket/889 if k[:8] == 'headers.': cherrypy.serving.response.headers[k.split('.', 1)[1]] = v else: setattr(cherrypy.serving.response, k, v) def error_page_namespace(k, v): """Attach error pages declared in config.""" if k != 'default': k = int(k) cherrypy.serving.request.error_page[k] = v hookpoints = ['on_start_resource', 'before_request_body', 'before_handler', 'before_finalize', 'on_end_resource', 'on_end_request', 'before_error_response', 'after_error_response'] class Request(object): """An HTTP request. This object represents the metadata of an HTTP request message; that is, it contains attributes which describe the environment in which the request URL, headers, and body were sent (if you want tools to interpret the headers and body, those are elsewhere, mostly in Tools). This 'metadata' consists of socket data, transport characteristics, and the Request-Line. This object also contains data regarding the configuration in effect for the given URL, and the execution plan for generating a response. """ prev = None """ The previous Request object (if any). This should be None unless we are processing an InternalRedirect.""" # Conversation/connection attributes local = httputil.Host("127.0.0.1", 80) "An httputil.Host(ip, port, hostname) object for the server socket." remote = httputil.Host("127.0.0.1", 1111) "An httputil.Host(ip, port, hostname) object for the client socket." scheme = "http" """ The protocol used between client and server. In most cases, this will be either 'http' or 'https'.""" server_protocol = "HTTP/1.1" """ The HTTP version for which the HTTP server is at least conditionally compliant.""" base = "" """The (scheme://host) portion of the requested URL. In some cases (e.g. when proxying via mod_rewrite), this may contain path segments which cherrypy.url uses when constructing url's, but which otherwise are ignored by CherryPy. Regardless, this value MUST NOT end in a slash.""" # Request-Line attributes request_line = "" """ The complete Request-Line received from the client. This is a single string consisting of the request method, URI, and protocol version (joined by spaces). Any final CRLF is removed.""" method = "GET" """ Indicates the HTTP method to be performed on the resource identified by the Request-URI. Common methods include GET, HEAD, POST, PUT, and DELETE. CherryPy allows any extension method; however, various HTTP servers and gateways may restrict the set of allowable methods. CherryPy applications SHOULD restrict the set (on a per-URI basis).""" query_string = "" """ The query component of the Request-URI, a string of information to be interpreted by the resource. The query portion of a URI follows the path component, and is separated by a '?'. For example, the URI 'http://www.cherrypy.org/wiki?a=3&b=4' has the query component, 'a=3&b=4'.""" query_string_encoding = 'utf8' """ The encoding expected for query string arguments after % HEX HEX decoding). If a query string is provided that cannot be decoded with this encoding, 404 is raised (since technically it's a different URI). If you want arbitrary encodings to not error, set this to 'Latin-1'; you can then encode back to bytes and re-decode to whatever encoding you like later. """ protocol = (1, 1) """The HTTP protocol version corresponding to the set of features which should be allowed in the response. If BOTH the client's request message AND the server's level of HTTP compliance is HTTP/1.1, this attribute will be the tuple (1, 1). If either is 1.0, this attribute will be the tuple (1, 0). Lower HTTP protocol versions are not explicitly supported.""" params = {} """ A dict which combines query string (GET) and request entity (POST) variables. This is populated in two stages: GET params are added before the 'on_start_resource' hook, and POST params are added between the 'before_request_body' and 'before_handler' hooks.""" # Message attributes header_list = [] """ A list of the HTTP request headers as (name, value) tuples. In general, you should use request.headers (a dict) instead.""" headers = httputil.HeaderMap() """ A dict-like object containing the request headers. Keys are header names (in Title-Case format); however, you may get and set them in a case-insensitive manner. That is, headers['Content-Type'] and headers['content-type'] refer to the same value. Values are header values (decoded according to :rfc:`2047` if necessary). See also: httputil.HeaderMap, httputil.HeaderElement.""" cookie = SimpleCookie() """See help(Cookie).""" rfile = None """ If the request included an entity (body), it will be available as a stream in this attribute. However, the rfile will normally be read for you between the 'before_request_body' hook and the 'before_handler' hook, and the resulting string is placed into either request.params or the request.body attribute. You may disable the automatic consumption of the rfile by setting request.process_request_body to False, either in config for the desired path, or in an 'on_start_resource' or 'before_request_body' hook. WARNING: In almost every case, you should not attempt to read from the rfile stream after CherryPy's automatic mechanism has read it. If you turn off the automatic parsing of rfile, you should read exactly the number of bytes specified in request.headers['Content-Length']. Ignoring either of these warnings may result in a hung request thread or in corruption of the next (pipelined) request. """ process_request_body = True """ If True, the rfile (if any) is automatically read and parsed, and the result placed into request.params or request.body.""" methods_with_bodies = ("POST", "PUT") """ A sequence of HTTP methods for which CherryPy will automatically attempt to read a body from the rfile. If you are going to change this property, modify it on the configuration (recommended) or on the "hook point" `on_start_resource`. """ body = None """ If the request Content-Type is 'application/x-www-form-urlencoded' or multipart, this will be None. Otherwise, this will be an instance of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>` (which you can .read()); this value is set between the 'before_request_body' and 'before_handler' hooks (assuming that process_request_body is True).""" # Dispatch attributes dispatch = cherrypy.dispatch.Dispatcher() """ The object which looks up the 'page handler' callable and collects config for the current request based on the path_info, other request attributes, and the application architecture. The core calls the dispatcher as early as possible, passing it a 'path_info' argument. The default dispatcher discovers the page handler by matching path_info to a hierarchical arrangement of objects, starting at request.app.root. See help(cherrypy.dispatch) for more information.""" script_name = "" """ The 'mount point' of the application which is handling this request. This attribute MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). """ path_info = "/" """ The 'relative path' portion of the Request-URI. This is relative to the script_name ('mount point') of the application which is handling this request.""" login = None """ When authentication is used during the request processing this is set to 'False' if it failed and to the 'username' value if it succeeded. The default 'None' implies that no authentication happened.""" # Note that cherrypy.url uses "if request.app:" to determine whether # the call is during a real HTTP request or not. So leave this None. app = None """The cherrypy.Application object which is handling this request.""" handler = None """ The function, method, or other callable which CherryPy will call to produce the response. The discovery of the handler and the arguments it will receive are determined by the request.dispatch object. By default, the handler is discovered by walking a tree of objects starting at request.app.root, and is then passed all HTTP params (from the query string and POST body) as keyword arguments.""" toolmaps = {} """ A nested dict of all Toolboxes and Tools in effect for this request, of the form: {Toolbox.namespace: {Tool.name: config dict}}.""" config = None """ A flat dict of all configuration entries which apply to the current request. These entries are collected from global config, application config (based on request.path_info), and from handler config (exactly how is governed by the request.dispatch object in effect for this request; by default, handler config can be attached anywhere in the tree between request.app.root and the final handler, and inherits downward).""" is_index = None """ This will be True if the current request is mapped to an 'index' resource handler (also, a 'default' handler if path_info ends with a slash). The value may be used to automatically redirect the user-agent to a 'more canonical' URL which either adds or removes the trailing slash. See cherrypy.tools.trailing_slash.""" hooks = HookMap(hookpoints) """ A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}. Each key is a str naming the hook point, and each value is a list of hooks which will be called at that hook point during this request. The list of hooks is generally populated as early as possible (mostly from Tools specified in config), but may be extended at any time. See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools.""" error_response = cherrypy.HTTPError(500).set_response """ The no-arg callable which will handle unexpected, untrapped errors during request processing. This is not used for expected exceptions (like NotFound, HTTPError, or HTTPRedirect) which are raised in response to expected conditions (those should be customized either via request.error_page or by overriding HTTPError.set_response). By default, error_response uses HTTPError(500) to return a generic error response to the user-agent.""" error_page = {} """ A dict of {error code: response filename or callable} pairs. The error code must be an int representing a given HTTP error code, or the string 'default', which will be used if no matching entry is found for a given numeric code. If a filename is provided, the file should contain a Python string- formatting template, and can expect by default to receive format values with the mapping keys %(status)s, %(message)s, %(traceback)s, and %(version)s. The set of format mappings can be extended by overriding HTTPError.set_response. If a callable is provided, it will be called by default with keyword arguments 'status', 'message', 'traceback', and 'version', as for a string-formatting template. The callable must return a string or iterable of strings which will be set to response.body. It may also override headers or perform any other processing. If no entry is given for an error code, and no 'default' entry exists, a default template will be used. """ show_tracebacks = True """ If True, unexpected errors encountered during request processing will include a traceback in the response body.""" show_mismatched_params = True """ If True, mismatched parameters encountered during PageHandler invocation processing will be included in the response body.""" throws = (KeyboardInterrupt, SystemExit, cherrypy.InternalRedirect) """The sequence of exceptions which Request.run does not trap.""" throw_errors = False """ If True, Request.run will not trap any errors (except HTTPRedirect and HTTPError, which are more properly called 'exceptions', not errors).""" closed = False """True once the close method has been called, False otherwise.""" stage = None """ A string containing the stage reached in the request-handling process. This is useful when debugging a live server with hung requests.""" namespaces = _cpconfig.NamespaceSet( **{"hooks": hooks_namespace, "request": request_namespace, "response": response_namespace, "error_page": error_page_namespace, "tools": cherrypy.tools, }) def __init__(self, local_host, remote_host, scheme="http", server_protocol="HTTP/1.1"): """Populate a new Request object. local_host should be an httputil.Host object with the server info. remote_host should be an httputil.Host object with the client info. scheme should be a string, either "http" or "https". """ self.local = local_host self.remote = remote_host self.scheme = scheme self.server_protocol = server_protocol self.closed = False # Put a *copy* of the class error_page into self. self.error_page = self.error_page.copy() # Put a *copy* of the class namespaces into self. self.namespaces = self.namespaces.copy() self.stage = None def close(self): """Run cleanup code. (Core)""" if not self.closed: self.closed = True self.stage = 'on_end_request' self.hooks.run('on_end_request') self.stage = 'close' def run(self, method, path, query_string, req_protocol, headers, rfile): r"""Process the Request. (Core) method, path, query_string, and req_protocol should be pulled directly from the Request-Line (e.g. "GET /path?key=val HTTP/1.0"). path This should be %XX-unquoted, but query_string should not be. When using Python 2, they both MUST be byte strings, not unicode strings. When using Python 3, they both MUST be unicode strings, not byte strings, and preferably not bytes \x00-\xFF disguised as unicode. headers A list of (name, value) tuples. rfile A file-like object containing the HTTP request entity. When run() is done, the returned object should have 3 attributes: * status, e.g. "200 OK" * header_list, a list of (name, value) tuples * body, an iterable yielding strings Consumer code (HTTP servers) should then access these response attributes to build the outbound stream. """ response = cherrypy.serving.response self.stage = 'run' try: self.error_response = cherrypy.HTTPError(500).set_response self.method = method path = path or "/" self.query_string = query_string or '' self.params = {} # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. rp = int(req_protocol[5]), int(req_protocol[7]) sp = int(self.server_protocol[5]), int(self.server_protocol[7]) self.protocol = min(rp, sp) response.headers.protocol = self.protocol # Rebuild first line of the request (e.g. "GET /path HTTP/1.0"). url = path if query_string: url += '?' + query_string self.request_line = '%s %s %s' % (method, url, req_protocol) self.header_list = list(headers) self.headers = httputil.HeaderMap() self.rfile = rfile self.body = None self.cookie = SimpleCookie() self.handler = None # path_info should be the path from the # app root (script_name) to the handler. self.script_name = self.app.script_name self.path_info = pi = path[len(self.script_name):] self.stage = 'respond' self.respond(pi) except self.throws: raise except: if self.throw_errors: raise else: # Failure in setup, error handler or finalize. Bypass them. # Can't use handle_error because we may not have hooks yet. cherrypy.log(traceback=True, severity=40) if self.show_tracebacks: body = format_exc() else: body = "" r = bare_error(body) response.output_status, response.header_list, response.body = r if self.method == "HEAD": # HEAD requests MUST NOT return a message-body in the response. response.body = [] try: cherrypy.log.access() except: cherrypy.log.error(traceback=True) if response.timed_out: raise cherrypy.TimeoutError() return response # Uncomment for stage debugging # stage = property(lambda self: self._stage, lambda self, v: print(v)) def respond(self, path_info): """Generate a response for the resource at self.path_info. (Core)""" response = cherrypy.serving.response try: try: try: if self.app is None: raise cherrypy.NotFound() # Get the 'Host' header, so we can HTTPRedirect properly. self.stage = 'process_headers' self.process_headers() # Make a copy of the class hooks self.hooks = self.__class__.hooks.copy() self.toolmaps = {} self.stage = 'get_resource' self.get_resource(path_info) self.body = _cpreqbody.RequestBody( self.rfile, self.headers, request_params=self.params) self.namespaces(self.config) self.stage = 'on_start_resource' self.hooks.run('on_start_resource') # Parse the querystring self.stage = 'process_query_string' self.process_query_string() # Process the body if self.process_request_body: if self.method not in self.methods_with_bodies: self.process_request_body = False self.stage = 'before_request_body' self.hooks.run('before_request_body') if self.process_request_body: self.body.process() # Run the handler self.stage = 'before_handler' self.hooks.run('before_handler') if self.handler: self.stage = 'handler' response.body = self.handler() # Finalize self.stage = 'before_finalize' self.hooks.run('before_finalize') response.finalize() except (cherrypy.HTTPRedirect, cherrypy.HTTPError): inst = sys.exc_info()[1] inst.set_response() self.stage = 'before_finalize (HTTPError)' self.hooks.run('before_finalize') response.finalize() finally: self.stage = 'on_end_resource' self.hooks.run('on_end_resource') except self.throws: raise except: if self.throw_errors: raise self.handle_error() def process_query_string(self): """Parse the query string into Python structures. (Core)""" try: p = httputil.parse_query_string( self.query_string, encoding=self.query_string_encoding) except UnicodeDecodeError: raise cherrypy.HTTPError( 404, "The given query string could not be processed. Query " "strings for this resource must be encoded with %r." % self.query_string_encoding) # Python 2 only: keyword arguments must be byte strings (type 'str'). if not py3k: for key, value in p.items(): if isinstance(key, unicode): del p[key] p[key.encode(self.query_string_encoding)] = value self.params.update(p) def process_headers(self): """Parse HTTP header data into Python structures. (Core)""" # Process the headers into self.headers headers = self.headers for name, value in self.header_list: # Call title() now (and use dict.__method__(headers)) # so title doesn't have to be called twice. name = name.title() value = value.strip() # Warning: if there is more than one header entry for cookies # (AFAIK, only Konqueror does that), only the last one will # remain in headers (but they will be correctly stored in # request.cookie). if "=?" in value: dict.__setitem__(headers, name, httputil.decode_TEXT(value)) else: dict.__setitem__(headers, name, value) # Handle cookies differently because on Konqueror, multiple # cookies come on different lines with the same key if name == 'Cookie': try: self.cookie.load(value) except CookieError: msg = "Illegal cookie name %s" % value.split('=')[0] raise cherrypy.HTTPError(400, msg) if not dict.__contains__(headers, 'Host'): # All Internet-based HTTP/1.1 servers MUST respond with a 400 # (Bad Request) status code to any HTTP/1.1 request message # which lacks a Host header field. if self.protocol >= (1, 1): msg = "HTTP/1.1 requires a 'Host' request header." raise cherrypy.HTTPError(400, msg) host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip self.base = "%s://%s" % (self.scheme, host) def get_resource(self, path): """Call a dispatcher (which sets self.handler and .config). (Core)""" # First, see if there is a custom dispatch at this URI. Custom # dispatchers can only be specified in app.config, not in _cp_config # (since custom dispatchers may not even have an app.root). dispatch = self.app.find_config( path, "request.dispatch", self.dispatch) # dispatch() should set self.handler and self.config dispatch(path) def handle_error(self): """Handle the last unanticipated exception. (Core)""" try: self.hooks.run("before_error_response") if self.error_response: self.error_response() self.hooks.run("after_error_response") cherrypy.serving.response.finalize() except cherrypy.HTTPRedirect: inst = sys.exc_info()[1] inst.set_response() cherrypy.serving.response.finalize() # ------------------------- Properties ------------------------- # def _get_body_params(self): warnings.warn( "body_params is deprecated in CherryPy 3.2, will be removed in " "CherryPy 3.3.", DeprecationWarning ) return self.body.params body_params = property(_get_body_params, doc=""" If the request Content-Type is 'application/x-www-form-urlencoded' or multipart, this will be a dict of the params pulled from the entity body; that is, it will be the portion of request.params that come from the message body (sometimes called "POST params", although they can be sent with various HTTP method verbs). This value is set between the 'before_request_body' and 'before_handler' hooks (assuming that process_request_body is True). Deprecated in 3.2, will be removed for 3.3 in favor of :attr:`request.body.params<cherrypy._cprequest.RequestBody.params>`.""") class ResponseBody(object): """The body of the HTTP response (the response entity).""" if py3k: unicode_err = ("Page handlers MUST return bytes. Use tools.encode " "if you wish to return unicode.") def __get__(self, obj, objclass=None): if obj is None: # When calling on the class instead of an instance... return self else: return obj._body def __set__(self, obj, value): # Convert the given value to an iterable object. if py3k and isinstance(value, str): raise ValueError(self.unicode_err) if isinstance(value, basestring): # strings get wrapped in a list because iterating over a single # item list is much faster than iterating over every character # in a long string. if value: value = [value] else: # [''] doesn't evaluate to False, so replace it with []. value = [] elif py3k and isinstance(value, list): # every item in a list must be bytes... for i, item in enumerate(value): if isinstance(item, str): raise ValueError(self.unicode_err) # Don't use isinstance here; io.IOBase which has an ABC takes # 1000 times as long as, say, isinstance(value, str) elif hasattr(value, 'read'): value = file_generator(value) elif value is None: value = [] obj._body = value class Response(object): """An HTTP Response, including status, headers, and body.""" status = "" """The HTTP Status-Code and Reason-Phrase.""" header_list = [] """ A list of the HTTP response headers as (name, value) tuples. In general, you should use response.headers (a dict) instead. This attribute is generated from response.headers and is not valid until after the finalize phase.""" headers = httputil.HeaderMap() """ A dict-like object containing the response headers. Keys are header names (in Title-Case format); however, you may get and set them in a case-insensitive manner. That is, headers['Content-Type'] and headers['content-type'] refer to the same value. Values are header values (decoded according to :rfc:`2047` if necessary). .. seealso:: classes :class:`HeaderMap`, :class:`HeaderElement` """ cookie = SimpleCookie() """See help(Cookie).""" body = ResponseBody() """The body (entity) of the HTTP response.""" time = None """The value of time.time() when created. Use in HTTP dates.""" timeout = 300 """Seconds after which the response will be aborted.""" timed_out = False """ Flag to indicate the response should be aborted, because it has exceeded its timeout.""" stream = False """If False, buffer the response body.""" def __init__(self): self.status = None self.header_list = None self._body = [] self.time = time.time() self.headers = httputil.HeaderMap() # Since we know all our keys are titled strings, we can # bypass HeaderMap.update and get a big speed boost. dict.update(self.headers, { "Content-Type": 'text/html', "Server": "CherryPy/" + cherrypy.__version__, "Date": httputil.HTTPDate(self.time), }) self.cookie = SimpleCookie() def collapse_body(self): """Collapse self.body to a single string; replace it and return it.""" if isinstance(self.body, basestring): return self.body newbody = [] for chunk in self.body: if py3k and not isinstance(chunk, bytes): raise TypeError("Chunk %s is not of type 'bytes'." % repr(chunk)) newbody.append(chunk) newbody = ntob('').join(newbody) self.body = newbody return newbody def finalize(self): """Transform headers (and cookies) into self.header_list. (Core)""" try: code, reason, _ = httputil.valid_status(self.status) except ValueError: raise cherrypy.HTTPError(500, sys.exc_info()[1].args[0]) headers = self.headers self.status = "%s %s" % (code, reason) self.output_status = ntob(str(code), 'ascii') + \ ntob(" ") + headers.encode(reason) if self.stream: # The upshot: wsgiserver will chunk the response if # you pop Content-Length (or set it explicitly to None). # Note that lib.static sets C-L to the file's st_size. if dict.get(headers, 'Content-Length') is None: dict.pop(headers, 'Content-Length', None) elif code < 200 or code in (204, 205, 304): # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." dict.pop(headers, 'Content-Length', None) self.body = ntob("") else: # Responses which are not streamed should have a Content-Length, # but allow user code to set Content-Length if desired. if dict.get(headers, 'Content-Length') is None: content = self.collapse_body() dict.__setitem__(headers, 'Content-Length', len(content)) # Transform our header dict into a list of tuples. self.header_list = h = headers.output() cookie = self.cookie.output() if cookie: for line in cookie.split("\n"): if line.endswith("\r"): # Python 2.4 emits cookies joined by LF but 2.5+ by CRLF. line = line[:-1] name, value = line.split(": ", 1) if isinstance(name, unicodestr): name = name.encode("ISO-8859-1") if isinstance(value, unicodestr): value = headers.encode(value) h.append((name, value)) def check_timeout(self): """If now > self.time + self.timeout, set self.timed_out. This purposefully sets a flag, rather than raising an error, so that a monitor thread can interrupt the Response thread. """ if time.time() > self.time + self.timeout: self.timed_out = True
37,180
Python
.py
798
37.073935
79
0.621013
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,877
_cplogging.py
evilhero_mylar/lib/cherrypy/_cplogging.py
""" Simple config ============= Although CherryPy uses the :mod:`Python logging module <logging>`, it does so behind the scenes so that simple logging is simple, but complicated logging is still possible. "Simple" logging means that you can log to the screen (i.e. console/stdout) or to a file, and that you can easily have separate error and access log files. Here are the simplified logging settings. You use these by adding lines to your config file or dict. You should set these at either the global level or per application (see next), but generally not both. * ``log.screen``: Set this to True to have both "error" and "access" messages printed to stdout. * ``log.access_file``: Set this to an absolute filename where you want "access" messages written. * ``log.error_file``: Set this to an absolute filename where you want "error" messages written. Many events are automatically logged; to log your own application events, call :func:`cherrypy.log`. Architecture ============ Separate scopes --------------- CherryPy provides log managers at both the global and application layers. This means you can have one set of logging rules for your entire site, and another set of rules specific to each application. The global log manager is found at :func:`cherrypy.log`, and the log manager for each application is found at :attr:`app.log<cherrypy._cptree.Application.log>`. If you're inside a request, the latter is reachable from ``cherrypy.request.app.log``; if you're outside a request, you'll have to obtain a reference to the ``app``: either the return value of :func:`tree.mount()<cherrypy._cptree.Tree.mount>` or, if you used :func:`quickstart()<cherrypy.quickstart>` instead, via ``cherrypy.tree.apps['/']``. By default, the global logs are named "cherrypy.error" and "cherrypy.access", and the application logs are named "cherrypy.error.2378745" and "cherrypy.access.2378745" (the number is the id of the Application object). This means that the application logs "bubble up" to the site logs, so if your application has no log handlers, the site-level handlers will still log the messages. Errors vs. Access ----------------- Each log manager handles both "access" messages (one per HTTP request) and "error" messages (everything else). Note that the "error" log is not just for errors! The format of access messages is highly formalized, but the error log isn't--it receives messages from a variety of sources (including full error tracebacks, if enabled). If you are logging the access log and error log to the same source, then there is a possibility that a specially crafted error message may replicate an access log message as described in CWE-117. In this case it is the application developer's responsibility to manually escape data before using CherryPy's log() functionality, or they may create an application that is vulnerable to CWE-117. This would be achieved by using a custom handler escape any special characters, and attached as described below. Custom Handlers =============== The simple settings above work by manipulating Python's standard :mod:`logging` module. So when you need something more complex, the full power of the standard module is yours to exploit. You can borrow or create custom handlers, formats, filters, and much more. Here's an example that skips the standard FileHandler and uses a RotatingFileHandler instead: :: #python log = app.log # Remove the default FileHandlers if present. log.error_file = "" log.access_file = "" maxBytes = getattr(log, "rot_maxBytes", 10000000) backupCount = getattr(log, "rot_backupCount", 1000) # Make a new RotatingFileHandler for the error log. fname = getattr(log, "rot_error_file", "error.log") h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount) h.setLevel(DEBUG) h.setFormatter(_cplogging.logfmt) log.error_log.addHandler(h) # Make a new RotatingFileHandler for the access log. fname = getattr(log, "rot_access_file", "access.log") h = handlers.RotatingFileHandler(fname, 'a', maxBytes, backupCount) h.setLevel(DEBUG) h.setFormatter(_cplogging.logfmt) log.access_log.addHandler(h) The ``rot_*`` attributes are pulled straight from the application log object. Since "log.*" config entries simply set attributes on the log object, you can add custom attributes to your heart's content. Note that these handlers are used ''instead'' of the default, simple handlers outlined above (so don't set the "log.error_file" config entry, for example). """ import datetime import logging # Silence the no-handlers "warning" (stderr write!) in stdlib logging logging.Logger.manager.emittedNoHandlerWarning = 1 logfmt = logging.Formatter("%(message)s") import os import sys import cherrypy from cherrypy import _cperror from cherrypy._cpcompat import ntob, py3k class NullHandler(logging.Handler): """A no-op logging handler to silence the logging.lastResort handler.""" def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None class LogManager(object): """An object to assist both simple and advanced logging. ``cherrypy.log`` is an instance of this class. """ appid = None """The id() of the Application object which owns this log manager. If this is a global log manager, appid is None.""" error_log = None """The actual :class:`logging.Logger` instance for error messages.""" access_log = None """The actual :class:`logging.Logger` instance for access messages.""" if py3k: access_log_format = \ '{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}"' else: access_log_format = \ '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' logger_root = None """The "top-level" logger name. This string will be used as the first segment in the Logger names. The default is "cherrypy", for example, in which case the Logger names will be of the form:: cherrypy.error.<appid> cherrypy.access.<appid> """ def __init__(self, appid=None, logger_root="cherrypy"): self.logger_root = logger_root self.appid = appid if appid is None: self.error_log = logging.getLogger("%s.error" % logger_root) self.access_log = logging.getLogger("%s.access" % logger_root) else: self.error_log = logging.getLogger( "%s.error.%s" % (logger_root, appid)) self.access_log = logging.getLogger( "%s.access.%s" % (logger_root, appid)) self.error_log.setLevel(logging.INFO) self.access_log.setLevel(logging.INFO) # Silence the no-handlers "warning" (stderr write!) in stdlib logging self.error_log.addHandler(NullHandler()) self.access_log.addHandler(NullHandler()) cherrypy.engine.subscribe('graceful', self.reopen_files) def reopen_files(self): """Close and reopen all file handlers.""" for log in (self.error_log, self.access_log): for h in log.handlers: if isinstance(h, logging.FileHandler): h.acquire() h.stream.close() h.stream = open(h.baseFilename, h.mode) h.release() def error(self, msg='', context='', severity=logging.INFO, traceback=False): """Write the given ``msg`` to the error log. This is not just for errors! Applications may call this at any time to log application-specific information. If ``traceback`` is True, the traceback of the current exception (if any) will be appended to ``msg``. """ if traceback: msg += _cperror.format_exc() self.error_log.log(severity, ' '.join((self.time(), context, msg))) def __call__(self, *args, **kwargs): """An alias for ``error``.""" return self.error(*args, **kwargs) def access(self): """Write to the access log (in Apache/NCSA Combined Log format). See the `apache documentation <http://httpd.apache.org/docs/current/logs.html#combined>`_ for format details. CherryPy calls this automatically for you. Note there are no arguments; it collects the data itself from :class:`cherrypy.request<cherrypy._cprequest.Request>`. Like Apache started doing in 2.0.46, non-printable and other special characters in %r (and we expand that to all parts) are escaped using \\xhh sequences, where hh stands for the hexadecimal representation of the raw byte. Exceptions from this rule are " and \\, which are escaped by prepending a backslash, and all whitespace characters, which are written in their C-style notation (\\n, \\t, etc). """ request = cherrypy.serving.request remote = request.remote response = cherrypy.serving.response outheaders = response.headers inheaders = request.headers if response.output_status is None: status = "-" else: status = response.output_status.split(ntob(" "), 1)[0] if py3k: status = status.decode('ISO-8859-1') atoms = {'h': remote.name or remote.ip, 'l': '-', 'u': getattr(request, "login", None) or "-", 't': self.time(), 'r': request.request_line, 's': status, 'b': dict.get(outheaders, 'Content-Length', '') or "-", 'f': dict.get(inheaders, 'Referer', ''), 'a': dict.get(inheaders, 'User-Agent', ''), 'o': dict.get(inheaders, 'Host', '-'), } if py3k: for k, v in atoms.items(): if not isinstance(v, str): v = str(v) v = v.replace('"', '\\"').encode('utf8') # Fortunately, repr(str) escapes unprintable chars, \n, \t, etc # and backslash for us. All we have to do is strip the quotes. v = repr(v)[2:-1] # in python 3.0 the repr of bytes (as returned by encode) # uses double \'s. But then the logger escapes them yet, again # resulting in quadruple slashes. Remove the extra one here. v = v.replace('\\\\', '\\') # Escape double-quote. atoms[k] = v try: self.access_log.log( logging.INFO, self.access_log_format.format(**atoms)) except: self(traceback=True) else: for k, v in atoms.items(): if isinstance(v, unicode): v = v.encode('utf8') elif not isinstance(v, str): v = str(v) # Fortunately, repr(str) escapes unprintable chars, \n, \t, etc # and backslash for us. All we have to do is strip the quotes. v = repr(v)[1:-1] # Escape double-quote. atoms[k] = v.replace('"', '\\"') try: self.access_log.log( logging.INFO, self.access_log_format % atoms) except: self(traceback=True) def time(self): """Return now() in Apache Common Log Format (no timezone).""" now = datetime.datetime.now() monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] month = monthnames[now.month - 1].capitalize() return ('[%02d/%s/%04d:%02d:%02d:%02d]' % (now.day, month, now.year, now.hour, now.minute, now.second)) def _get_builtin_handler(self, log, key): for h in log.handlers: if getattr(h, "_cpbuiltin", None) == key: return h # ------------------------- Screen handlers ------------------------- # def _set_screen_handler(self, log, enable, stream=None): h = self._get_builtin_handler(log, "screen") if enable: if not h: if stream is None: stream = sys.stderr h = logging.StreamHandler(stream) h.setFormatter(logfmt) h._cpbuiltin = "screen" log.addHandler(h) elif h: log.handlers.remove(h) def _get_screen(self): h = self._get_builtin_handler has_h = h(self.error_log, "screen") or h(self.access_log, "screen") return bool(has_h) def _set_screen(self, newvalue): self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr) self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout) screen = property(_get_screen, _set_screen, doc="""Turn stderr/stdout logging on or off. If you set this to True, it'll add the appropriate StreamHandler for you. If you set it to False, it will remove the handler. """) # -------------------------- File handlers -------------------------- # def _add_builtin_file_handler(self, log, fname): h = logging.FileHandler(fname) h.setFormatter(logfmt) h._cpbuiltin = "file" log.addHandler(h) def _set_file_handler(self, log, filename): h = self._get_builtin_handler(log, "file") if filename: if h: if h.baseFilename != os.path.abspath(filename): h.close() log.handlers.remove(h) self._add_builtin_file_handler(log, filename) else: self._add_builtin_file_handler(log, filename) else: if h: h.close() log.handlers.remove(h) def _get_error_file(self): h = self._get_builtin_handler(self.error_log, "file") if h: return h.baseFilename return '' def _set_error_file(self, newvalue): self._set_file_handler(self.error_log, newvalue) error_file = property(_get_error_file, _set_error_file, doc="""The filename for self.error_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``None`` or ``''``, it will remove the handler. """) def _get_access_file(self): h = self._get_builtin_handler(self.access_log, "file") if h: return h.baseFilename return '' def _set_access_file(self, newvalue): self._set_file_handler(self.access_log, newvalue) access_file = property(_get_access_file, _set_access_file, doc="""The filename for self.access_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``None`` or ``''``, it will remove the handler. """) # ------------------------- WSGI handlers ------------------------- # def _set_wsgi_handler(self, log, enable): h = self._get_builtin_handler(log, "wsgi") if enable: if not h: h = WSGIErrorHandler() h.setFormatter(logfmt) h._cpbuiltin = "wsgi" log.addHandler(h) elif h: log.handlers.remove(h) def _get_wsgi(self): return bool(self._get_builtin_handler(self.error_log, "wsgi")) def _set_wsgi(self, newvalue): self._set_wsgi_handler(self.error_log, newvalue) wsgi = property(_get_wsgi, _set_wsgi, doc="""Write errors to wsgi.errors. If you set this to True, it'll add the appropriate :class:`WSGIErrorHandler<cherrypy._cplogging.WSGIErrorHandler>` for you (which writes errors to ``wsgi.errors``). If you set it to False, it will remove the handler. """) class WSGIErrorHandler(logging.Handler): "A handler class which writes logging records to environ['wsgi.errors']." def flush(self): """Flushes the stream.""" try: stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors') except (AttributeError, KeyError): pass else: stream.flush() def emit(self, record): """Emit a record.""" try: stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors') except (AttributeError, KeyError): pass else: try: msg = self.format(record) fs = "%s\n" import types # if no unicode support... if not hasattr(types, "UnicodeType"): stream.write(fs % msg) else: try: stream.write(fs % msg) except UnicodeError: stream.write(fs % msg.encode("UTF-8")) self.flush() except: self.handleError(record)
17,181
Python
.py
377
36.331565
89
0.607822
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,878
_cpwsgi_server.py
evilhero_mylar/lib/cherrypy/_cpwsgi_server.py
"""WSGI server interface (see PEP 333). This adds some CP-specific bits to the framework-agnostic wsgiserver package. """ import sys import cherrypy from cherrypy import wsgiserver class CPWSGIServer(wsgiserver.CherryPyWSGIServer): """Wrapper for wsgiserver.CherryPyWSGIServer. wsgiserver has been designed to not reference CherryPy in any way, so that it can be used in other frameworks and applications. Therefore, we wrap it here, so we can set our own mount points from cherrypy.tree and apply some attributes from config -> cherrypy.server -> wsgiserver. """ def __init__(self, server_adapter=cherrypy.server): self.server_adapter = server_adapter self.max_request_header_size = ( self.server_adapter.max_request_header_size or 0 ) self.max_request_body_size = ( self.server_adapter.max_request_body_size or 0 ) server_name = (self.server_adapter.socket_host or self.server_adapter.socket_file or None) self.wsgi_version = self.server_adapter.wsgi_version s = wsgiserver.CherryPyWSGIServer s.__init__(self, server_adapter.bind_addr, cherrypy.tree, self.server_adapter.thread_pool, server_name, max=self.server_adapter.thread_pool_max, request_queue_size=self.server_adapter.socket_queue_size, timeout=self.server_adapter.socket_timeout, shutdown_timeout=self.server_adapter.shutdown_timeout, accepted_queue_size=self.server_adapter.accepted_queue_size, accepted_queue_timeout=self.server_adapter.accepted_queue_timeout, ) self.protocol = self.server_adapter.protocol_version self.nodelay = self.server_adapter.nodelay if sys.version_info >= (3, 0): ssl_module = self.server_adapter.ssl_module or 'builtin' else: ssl_module = self.server_adapter.ssl_module or 'pyopenssl' if self.server_adapter.ssl_context: adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module) self.ssl_adapter = adapter_class( self.server_adapter.ssl_certificate, self.server_adapter.ssl_private_key, self.server_adapter.ssl_certificate_chain) self.ssl_adapter.context = self.server_adapter.ssl_context elif self.server_adapter.ssl_certificate: adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module) self.ssl_adapter = adapter_class( self.server_adapter.ssl_certificate, self.server_adapter.ssl_private_key, self.server_adapter.ssl_certificate_chain) self.stats['Enabled'] = getattr( self.server_adapter, 'statistics', False) def error_log(self, msg="", level=20, traceback=False): cherrypy.engine.log(msg, level, traceback)
3,023
Python
.py
59
39.864407
85
0.648154
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,879
_cpnative_server.py
evilhero_mylar/lib/cherrypy/_cpnative_server.py
"""Native adapter for serving CherryPy via its builtin server.""" import logging import sys import cherrypy from cherrypy._cpcompat import BytesIO from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil from cherrypy import wsgiserver class NativeGateway(wsgiserver.Gateway): recursive = False def respond(self): req = self.req try: # Obtain a Request object from CherryPy local = req.server.bind_addr local = httputil.Host(local[0], local[1], "") remote = req.conn.remote_addr, req.conn.remote_port remote = httputil.Host(remote[0], remote[1], "") scheme = req.scheme sn = cherrypy.tree.script_name(req.uri or "/") if sn is None: self.send_response('404 Not Found', [], ['']) else: app = cherrypy.tree.apps[sn] method = req.method path = req.path qs = req.qs or "" headers = req.inheaders.items() rfile = req.rfile prev = None try: redirections = [] while True: request, response = app.get_serving( local, remote, scheme, "HTTP/1.1") request.multithread = True request.multiprocess = False request.app = app request.prev = prev # Run the CherryPy Request object and obtain the # response try: request.run(method, path, qs, req.request_protocol, headers, rfile) break except cherrypy.InternalRedirect: ir = sys.exc_info()[1] app.release_serving() prev = request if not self.recursive: if ir.path in redirections: raise RuntimeError( "InternalRedirector visited the same " "URL twice: %r" % ir.path) else: # Add the *previous* path_info + qs to # redirections. if qs: qs = "?" + qs redirections.append(sn + path + qs) # Munge environment and try again. method = "GET" path = ir.path qs = ir.query_string rfile = BytesIO() self.send_response( response.output_status, response.header_list, response.body) finally: app.release_serving() except: tb = format_exc() # print tb cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR) s, h, b = bare_error() self.send_response(s, h, b) def send_response(self, status, headers, body): req = self.req # Set response status req.status = str(status or "500 Server Error") # Set response headers for header, value in headers: req.outheaders.append((header, value)) if (req.ready and not req.sent_headers): req.sent_headers = True req.send_headers() # Set response body for seg in body: req.write(seg) class CPHTTPServer(wsgiserver.HTTPServer): """Wrapper for wsgiserver.HTTPServer. wsgiserver has been designed to not reference CherryPy in any way, so that it can be used in other frameworks and applications. Therefore, we wrap it here, so we can apply some attributes from config -> cherrypy.server -> HTTPServer. """ def __init__(self, server_adapter=cherrypy.server): self.server_adapter = server_adapter server_name = (self.server_adapter.socket_host or self.server_adapter.socket_file or None) wsgiserver.HTTPServer.__init__( self, server_adapter.bind_addr, NativeGateway, minthreads=server_adapter.thread_pool, maxthreads=server_adapter.thread_pool_max, server_name=server_name) self.max_request_header_size = ( self.server_adapter.max_request_header_size or 0) self.max_request_body_size = ( self.server_adapter.max_request_body_size or 0) self.request_queue_size = self.server_adapter.socket_queue_size self.timeout = self.server_adapter.socket_timeout self.shutdown_timeout = self.server_adapter.shutdown_timeout self.protocol = self.server_adapter.protocol_version self.nodelay = self.server_adapter.nodelay ssl_module = self.server_adapter.ssl_module or 'pyopenssl' if self.server_adapter.ssl_context: adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module) self.ssl_adapter = adapter_class( self.server_adapter.ssl_certificate, self.server_adapter.ssl_private_key, self.server_adapter.ssl_certificate_chain) self.ssl_adapter.context = self.server_adapter.ssl_context elif self.server_adapter.ssl_certificate: adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module) self.ssl_adapter = adapter_class( self.server_adapter.ssl_certificate, self.server_adapter.ssl_private_key, self.server_adapter.ssl_certificate_chain)
6,000
Python
.py
129
30.077519
78
0.525145
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,880
_cpwsgi.py
evilhero_mylar/lib/cherrypy/_cpwsgi.py
"""WSGI interface (see PEP 333 and 3333). Note that WSGI environ keys and values are 'native strings'; that is, whatever the type of "" is. For Python 2, that's a byte string; for Python 3, it's a unicode string. But PEP 3333 says: "even if Python's str type is actually Unicode "under the hood", the content of native strings must still be translatable to bytes via the Latin-1 encoding!" """ import sys as _sys import cherrypy as _cherrypy from cherrypy._cpcompat import BytesIO, bytestr, ntob, ntou, py3k, unicodestr from cherrypy import _cperror from cherrypy.lib import httputil from cherrypy.lib import is_closable_iterator def downgrade_wsgi_ux_to_1x(environ): """Return a new environ dict for WSGI 1.x from the given WSGI u.x environ. """ env1x = {} url_encoding = environ[ntou('wsgi.url_encoding')] for k, v in list(environ.items()): if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]: v = v.encode(url_encoding) elif isinstance(v, unicodestr): v = v.encode('ISO-8859-1') env1x[k.encode('ISO-8859-1')] = v return env1x class VirtualHost(object): """Select a different WSGI application based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different applications. For example:: root = Root() RootApp = cherrypy.Application(root) Domain2App = cherrypy.Application(root) SecureApp = cherrypy.Application(Secure()) vhost = cherrypy._cpwsgi.VirtualHost(RootApp, domains={'www.domain2.example': Domain2App, 'www.domain2.example:443': SecureApp, }) cherrypy.tree.graft(vhost) """ default = None """Required. The default WSGI application.""" use_x_forwarded_host = True """If True (the default), any "X-Forwarded-Host" request header will be used instead of the "Host" header. This is commonly added by HTTP servers (such as Apache) when proxying.""" domains = {} """A dict of {host header value: application} pairs. The incoming "Host" request header is looked up in this dict, and, if a match is found, the corresponding WSGI application will be called instead of the default. Note that you often need separate entries for "example.com" and "www.example.com". In addition, "Host" headers may contain the port number. """ def __init__(self, default, domains=None, use_x_forwarded_host=True): self.default = default self.domains = domains or {} self.use_x_forwarded_host = use_x_forwarded_host def __call__(self, environ, start_response): domain = environ.get('HTTP_HOST', '') if self.use_x_forwarded_host: domain = environ.get("HTTP_X_FORWARDED_HOST", domain) nextapp = self.domains.get(domain) if nextapp is None: nextapp = self.default return nextapp(environ, start_response) class InternalRedirector(object): """WSGI middleware that handles raised cherrypy.InternalRedirect.""" def __init__(self, nextapp, recursive=False): self.nextapp = nextapp self.recursive = recursive def __call__(self, environ, start_response): redirections = [] while True: environ = environ.copy() try: return self.nextapp(environ, start_response) except _cherrypy.InternalRedirect: ir = _sys.exc_info()[1] sn = environ.get('SCRIPT_NAME', '') path = environ.get('PATH_INFO', '') qs = environ.get('QUERY_STRING', '') # Add the *previous* path_info + qs to redirections. old_uri = sn + path if qs: old_uri += "?" + qs redirections.append(old_uri) if not self.recursive: # Check to see if the new URI has been redirected to # already new_uri = sn + ir.path if ir.query_string: new_uri += "?" + ir.query_string if new_uri in redirections: ir.request.close() raise RuntimeError("InternalRedirector visited the " "same URL twice: %r" % new_uri) # Munge the environment and try again. environ['REQUEST_METHOD'] = "GET" environ['PATH_INFO'] = ir.path environ['QUERY_STRING'] = ir.query_string environ['wsgi.input'] = BytesIO() environ['CONTENT_LENGTH'] = "0" environ['cherrypy.previous_request'] = ir.request class ExceptionTrapper(object): """WSGI middleware that traps exceptions.""" def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)): self.nextapp = nextapp self.throws = throws def __call__(self, environ, start_response): return _TrappedResponse( self.nextapp, environ, start_response, self.throws ) class _TrappedResponse(object): response = iter([]) def __init__(self, nextapp, environ, start_response, throws): self.nextapp = nextapp self.environ = environ self.start_response = start_response self.throws = throws self.started_response = False self.response = self.trap( self.nextapp, self.environ, self.start_response) self.iter_response = iter(self.response) def __iter__(self): self.started_response = True return self if py3k: def __next__(self): return self.trap(next, self.iter_response) else: def next(self): return self.trap(self.iter_response.next) def close(self): if hasattr(self.response, 'close'): self.response.close() def trap(self, func, *args, **kwargs): try: return func(*args, **kwargs) except self.throws: raise except StopIteration: raise except: tb = _cperror.format_exc() #print('trapped (started %s):' % self.started_response, tb) _cherrypy.log(tb, severity=40) if not _cherrypy.request.show_tracebacks: tb = "" s, h, b = _cperror.bare_error(tb) if py3k: # What fun. s = s.decode('ISO-8859-1') h = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1')) for k, v in h] if self.started_response: # Empty our iterable (so future calls raise StopIteration) self.iter_response = iter([]) else: self.iter_response = iter(b) try: self.start_response(s, h, _sys.exc_info()) except: # "The application must not trap any exceptions raised by # start_response, if it called start_response with exc_info. # Instead, it should allow such exceptions to propagate # back to the server or gateway." # But we still log and call close() to clean up ourselves. _cherrypy.log(traceback=True, severity=40) raise if self.started_response: return ntob("").join(b) else: return b # WSGI-to-CP Adapter # class AppResponse(object): """WSGI response iterable for CherryPy applications.""" def __init__(self, environ, start_response, cpapp): self.cpapp = cpapp try: if not py3k: if environ.get(ntou('wsgi.version')) == (ntou('u'), 0): environ = downgrade_wsgi_ux_to_1x(environ) self.environ = environ self.run() r = _cherrypy.serving.response outstatus = r.output_status if not isinstance(outstatus, bytestr): raise TypeError("response.output_status is not a byte string.") outheaders = [] for k, v in r.header_list: if not isinstance(k, bytestr): raise TypeError( "response.header_list key %r is not a byte string." % k) if not isinstance(v, bytestr): raise TypeError( "response.header_list value %r is not a byte string." % v) outheaders.append((k, v)) if py3k: # According to PEP 3333, when using Python 3, the response # status and headers must be bytes masquerading as unicode; # that is, they must be of type "str" but are restricted to # code points in the "latin-1" set. outstatus = outstatus.decode('ISO-8859-1') outheaders = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1')) for k, v in outheaders] self.iter_response = iter(r.body) self.write = start_response(outstatus, outheaders) except: self.close() raise def __iter__(self): return self if py3k: def __next__(self): return next(self.iter_response) else: def next(self): return self.iter_response.next() def close(self): """Close and de-reference the current request and response. (Core)""" streaming = _cherrypy.serving.response.stream self.cpapp.release_serving() # We avoid the expense of examining the iterator to see if it's # closable unless we are streaming the response, as that's the # only situation where we are going to have an iterator which # may not have been exhausted yet. if streaming and is_closable_iterator(self.iter_response): iter_close = self.iter_response.close try: iter_close() except Exception: _cherrypy.log(traceback=True, severity=40) def run(self): """Create a Request object using environ.""" env = self.environ.get local = httputil.Host('', int(env('SERVER_PORT', 80)), env('SERVER_NAME', '')) remote = httputil.Host(env('REMOTE_ADDR', ''), int(env('REMOTE_PORT', -1) or -1), env('REMOTE_HOST', '')) scheme = env('wsgi.url_scheme') sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1") request, resp = self.cpapp.get_serving(local, remote, scheme, sproto) # LOGON_USER is served by IIS, and is the name of the # user after having been mapped to a local account. # Both IIS and Apache set REMOTE_USER, when possible. request.login = env('LOGON_USER') or env('REMOTE_USER') or None request.multithread = self.environ['wsgi.multithread'] request.multiprocess = self.environ['wsgi.multiprocess'] request.wsgi_environ = self.environ request.prev = env('cherrypy.previous_request', None) meth = self.environ['REQUEST_METHOD'] path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''), self.environ.get('PATH_INFO', '')) qs = self.environ.get('QUERY_STRING', '') if py3k: # This isn't perfect; if the given PATH_INFO is in the # wrong encoding, it may fail to match the appropriate config # section URI. But meh. old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1') new_enc = self.cpapp.find_config(self.environ.get('PATH_INFO', ''), "request.uri_encoding", 'utf-8') if new_enc.lower() != old_enc.lower(): # Even though the path and qs are unicode, the WSGI server # is required by PEP 3333 to coerce them to ISO-8859-1 # masquerading as unicode. So we have to encode back to # bytes and then decode again using the "correct" encoding. try: u_path = path.encode(old_enc).decode(new_enc) u_qs = qs.encode(old_enc).decode(new_enc) except (UnicodeEncodeError, UnicodeDecodeError): # Just pass them through without transcoding and hope. pass else: # Only set transcoded values if they both succeed. path = u_path qs = u_qs rproto = self.environ.get('SERVER_PROTOCOL') headers = self.translate_headers(self.environ) rfile = self.environ['wsgi.input'] request.run(meth, path, qs, rproto, headers, rfile) headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization', 'CONTENT_LENGTH': 'Content-Length', 'CONTENT_TYPE': 'Content-Type', 'REMOTE_HOST': 'Remote-Host', 'REMOTE_ADDR': 'Remote-Addr', } def translate_headers(self, environ): """Translate CGI-environ header names to HTTP header names.""" for cgiName in environ: # We assume all incoming header keys are uppercase already. if cgiName in self.headerNames: yield self.headerNames[cgiName], environ[cgiName] elif cgiName[:5] == "HTTP_": # Hackish attempt at recovering original header names. translatedHeader = cgiName[5:].replace("_", "-") yield translatedHeader, environ[cgiName] class CPWSGIApp(object): """A WSGI application object for a CherryPy Application.""" pipeline = [('ExceptionTrapper', ExceptionTrapper), ('InternalRedirector', InternalRedirector), ] """A list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a constructor that takes an initial, positional 'nextapp' argument, plus optional keyword arguments, and returns a WSGI application (that takes environ and start_response arguments). The 'name' can be any you choose, and will correspond to keys in self.config.""" head = None """Rather than nest all apps in the pipeline on each call, it's only done the first time, and the result is memoized into self.head. Set this to None again if you change self.pipeline after calling self.""" config = {} """A dict whose keys match names listed in the pipeline. Each value is a further dict which will be passed to the corresponding named WSGI callable (from the pipeline) as keyword arguments.""" response_class = AppResponse """The class to instantiate and return as the next app in the WSGI chain. """ def __init__(self, cpapp, pipeline=None): self.cpapp = cpapp self.pipeline = self.pipeline[:] if pipeline: self.pipeline.extend(pipeline) self.config = self.config.copy() def tail(self, environ, start_response): """WSGI application callable for the actual CherryPy application. You probably shouldn't call this; call self.__call__ instead, so that any WSGI middleware in self.pipeline can run first. """ return self.response_class(environ, start_response, self.cpapp) def __call__(self, environ, start_response): head = self.head if head is None: # Create and nest the WSGI apps in our pipeline (in reverse order). # Then memoize the result in self.head. head = self.tail for name, callable in self.pipeline[::-1]: conf = self.config.get(name, {}) head = callable(head, **conf) self.head = head return head(environ, start_response) def namespace_handler(self, k, v): """Config handler for the 'wsgi' namespace.""" if k == "pipeline": # Note this allows multiple 'wsgi.pipeline' config entries # (but each entry will be processed in a 'random' order). # It should also allow developers to set default middleware # in code (passed to self.__init__) that deployers can add to # (but not remove) via config. self.pipeline.extend(v) elif k == "response_class": self.response_class = v else: name, arg = k.split(".", 1) bucket = self.config.setdefault(name, {}) bucket[arg] = v
16,870
Python
.py
361
34.947368
79
0.580331
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,881
_cperror.py
evilhero_mylar/lib/cherrypy/_cperror.py
"""Exception classes for CherryPy. CherryPy provides (and uses) exceptions for declaring that the HTTP response should be a status other than the default "200 OK". You can ``raise`` them like normal Python exceptions. You can also call them and they will raise themselves; this means you can set an :class:`HTTPError<cherrypy._cperror.HTTPError>` or :class:`HTTPRedirect<cherrypy._cperror.HTTPRedirect>` as the :attr:`request.handler<cherrypy._cprequest.Request.handler>`. .. _redirectingpost: Redirecting POST ================ When you GET a resource and are redirected by the server to another Location, there's generally no problem since GET is both a "safe method" (there should be no side-effects) and an "idempotent method" (multiple calls are no different than a single call). POST, however, is neither safe nor idempotent--if you charge a credit card, you don't want to be charged twice by a redirect! For this reason, *none* of the 3xx responses permit a user-agent (browser) to resubmit a POST on redirection without first confirming the action with the user: ===== ================================= =========== 300 Multiple Choices Confirm with the user 301 Moved Permanently Confirm with the user 302 Found (Object moved temporarily) Confirm with the user 303 See Other GET the new URI--no confirmation 304 Not modified (for conditional GET only--POST should not raise this error) 305 Use Proxy Confirm with the user 307 Temporary Redirect Confirm with the user ===== ================================= =========== However, browsers have historically implemented these restrictions poorly; in particular, many browsers do not force the user to confirm 301, 302 or 307 when redirecting POST. For this reason, CherryPy defaults to 303, which most user-agents appear to have implemented correctly. Therefore, if you raise HTTPRedirect for a POST request, the user-agent will most likely attempt to GET the new URI (without asking for confirmation from the user). We realize this is confusing for developers, but it's the safest thing we could do. You are of course free to raise ``HTTPRedirect(uri, status=302)`` or any other 3xx status if you know what you're doing, but given the environment, we couldn't let any of those be the default. Custom Error Handling ===================== .. image:: /refman/cperrors.gif Anticipated HTTP responses -------------------------- The 'error_page' config namespace can be used to provide custom HTML output for expected responses (like 404 Not Found). Supply a filename from which the output will be read. The contents will be interpolated with the values %(status)s, %(message)s, %(traceback)s, and %(version)s using plain old Python `string formatting <http://docs.python.org/2/library/stdtypes.html#string-formatting-operations>`_. :: _cp_config = { 'error_page.404': os.path.join(localDir, "static/index.html") } Beginning in version 3.1, you may also provide a function or other callable as an error_page entry. It will be passed the same status, message, traceback and version arguments that are interpolated into templates:: def error_page_402(status, message, traceback, version): return "Error %s - Well, I'm very sorry but you haven't paid!" % status cherrypy.config.update({'error_page.402': error_page_402}) Also in 3.1, in addition to the numbered error codes, you may also supply "error_page.default" to handle all codes which do not have their own error_page entry. Unanticipated errors -------------------- CherryPy also has a generic error handling mechanism: whenever an unanticipated error occurs in your code, it will call :func:`Request.error_response<cherrypy._cprequest.Request.error_response>` to set the response status, headers, and body. By default, this is the same output as :class:`HTTPError(500) <cherrypy._cperror.HTTPError>`. If you want to provide some other behavior, you generally replace "request.error_response". Here is some sample code that shows how to display a custom error message and send an e-mail containing the error:: from cherrypy import _cperror def handle_error(): cherrypy.response.status = 500 cherrypy.response.body = [ "<html><body>Sorry, an error occured</body></html>" ] sendMail('error@domain.com', 'Error in your web app', _cperror.format_exc()) class Root: _cp_config = {'request.error_response': handle_error} Note that you have to explicitly set :attr:`response.body <cherrypy._cprequest.Response.body>` and not simply return an error message as a result. """ from cgi import escape as _escape from sys import exc_info as _exc_info from traceback import format_exception as _format_exception from cherrypy._cpcompat import basestring, bytestr, iteritems, ntob from cherrypy._cpcompat import tonative, urljoin as _urljoin from cherrypy.lib import httputil as _httputil class CherryPyException(Exception): """A base class for CherryPy exceptions.""" pass class TimeoutError(CherryPyException): """Exception raised when Response.timed_out is detected.""" pass class InternalRedirect(CherryPyException): """Exception raised to switch to the handler for a different URL. This exception will redirect processing to another path within the site (without informing the client). Provide the new path as an argument when raising the exception. Provide any params in the querystring for the new URL. """ def __init__(self, path, query_string=""): import cherrypy self.request = cherrypy.serving.request self.query_string = query_string if "?" in path: # Separate any params included in the path path, self.query_string = path.split("?", 1) # Note that urljoin will "do the right thing" whether url is: # 1. a URL relative to root (e.g. "/dummy") # 2. a URL relative to the current path # Note that any query string will be discarded. path = _urljoin(self.request.path_info, path) # Set a 'path' member attribute so that code which traps this # error can have access to it. self.path = path CherryPyException.__init__(self, path, self.query_string) class HTTPRedirect(CherryPyException): """Exception raised when the request should be redirected. This exception will force a HTTP redirect to the URL or URL's you give it. The new URL must be passed as the first argument to the Exception, e.g., HTTPRedirect(newUrl). Multiple URLs are allowed in a list. If a URL is absolute, it will be used as-is. If it is relative, it is assumed to be relative to the current cherrypy.request.path_info. If one of the provided URL is a unicode object, it will be encoded using the default encoding or the one passed in parameter. There are multiple types of redirect, from which you can select via the ``status`` argument. If you do not provide a ``status`` arg, it defaults to 303 (or 302 if responding with HTTP/1.0). Examples:: raise cherrypy.HTTPRedirect("") raise cherrypy.HTTPRedirect("/abs/path", 307) raise cherrypy.HTTPRedirect(["path1", "path2?a=1&b=2"], 301) See :ref:`redirectingpost` for additional caveats. """ status = None """The integer HTTP status code to emit.""" urls = None """The list of URL's to emit.""" encoding = 'utf-8' """The encoding when passed urls are not native strings""" def __init__(self, urls, status=None, encoding=None): import cherrypy request = cherrypy.serving.request if isinstance(urls, basestring): urls = [urls] abs_urls = [] for url in urls: url = tonative(url, encoding or self.encoding) # Note that urljoin will "do the right thing" whether url is: # 1. a complete URL with host (e.g. "http://www.example.com/test") # 2. a URL relative to root (e.g. "/dummy") # 3. a URL relative to the current path # Note that any query string in cherrypy.request is discarded. url = _urljoin(cherrypy.url(), url) abs_urls.append(url) self.urls = abs_urls # RFC 2616 indicates a 301 response code fits our goal; however, # browser support for 301 is quite messy. Do 302/303 instead. See # http://www.alanflavell.org.uk/www/post-redirect.html if status is None: if request.protocol >= (1, 1): status = 303 else: status = 302 else: status = int(status) if status < 300 or status > 399: raise ValueError("status must be between 300 and 399.") self.status = status CherryPyException.__init__(self, abs_urls, status) def set_response(self): """Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPRedirect object and set its output without *raising* the exception. """ import cherrypy response = cherrypy.serving.response response.status = status = self.status if status in (300, 301, 302, 303, 307): response.headers['Content-Type'] = "text/html;charset=utf-8" # "The ... URI SHOULD be given by the Location field # in the response." response.headers['Location'] = self.urls[0] # "Unless the request method was HEAD, the entity of the response # SHOULD contain a short hypertext note with a hyperlink to the # new URI(s)." msg = { 300: "This resource can be found at ", 301: "This resource has permanently moved to ", 302: "This resource resides temporarily at ", 303: "This resource can be found at ", 307: "This resource has moved temporarily to ", }[status] msg += '<a href=%s>%s</a>.' from xml.sax import saxutils msgs = [msg % (saxutils.quoteattr(u), u) for u in self.urls] response.body = ntob("<br />\n".join(msgs), 'utf-8') # Previous code may have set C-L, so we have to reset it # (allow finalize to set it). response.headers.pop('Content-Length', None) elif status == 304: # Not Modified. # "The response MUST include the following header fields: # Date, unless its omission is required by section 14.18.1" # The "Date" header should have been set in Response.__init__ # "...the response SHOULD NOT include other entity-headers." for key in ('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Location', 'Content-MD5', 'Content-Range', 'Content-Type', 'Expires', 'Last-Modified'): if key in response.headers: del response.headers[key] # "The 304 response MUST NOT contain a message-body." response.body = None # Previous code may have set C-L, so we have to reset it. response.headers.pop('Content-Length', None) elif status == 305: # Use Proxy. # self.urls[0] should be the URI of the proxy. response.headers['Location'] = self.urls[0] response.body = None # Previous code may have set C-L, so we have to reset it. response.headers.pop('Content-Length', None) else: raise ValueError("The %s status code is unknown." % status) def __call__(self): """Use this exception as a request.handler (raise self).""" raise self def clean_headers(status): """Remove any headers which should not apply to an error response.""" import cherrypy response = cherrypy.serving.response # Remove headers which applied to the original content, # but do not apply to the error page. respheaders = response.headers for key in ["Accept-Ranges", "Age", "ETag", "Location", "Retry-After", "Vary", "Content-Encoding", "Content-Length", "Expires", "Content-Location", "Content-MD5", "Last-Modified"]: if key in respheaders: del respheaders[key] if status != 416: # A server sending a response with status code 416 (Requested # range not satisfiable) SHOULD include a Content-Range field # with a byte-range-resp-spec of "*". The instance-length # specifies the current length of the selected resource. # A response with status code 206 (Partial Content) MUST NOT # include a Content-Range field with a byte-range- resp-spec of "*". if "Content-Range" in respheaders: del respheaders["Content-Range"] class HTTPError(CherryPyException): """Exception used to return an HTTP error code (4xx-5xx) to the client. This exception can be used to automatically send a response using a http status code, with an appropriate error page. It takes an optional ``status`` argument (which must be between 400 and 599); it defaults to 500 ("Internal Server Error"). It also takes an optional ``message`` argument, which will be returned in the response body. See `RFC2616 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4>`_ for a complete list of available error codes and when to use them. Examples:: raise cherrypy.HTTPError(403) raise cherrypy.HTTPError( "403 Forbidden", "You are not allowed to access this resource.") """ status = None """The HTTP status code. May be of type int or str (with a Reason-Phrase). """ code = None """The integer HTTP status code.""" reason = None """The HTTP Reason-Phrase string.""" def __init__(self, status=500, message=None): self.status = status try: self.code, self.reason, defaultmsg = _httputil.valid_status(status) except ValueError: raise self.__class__(500, _exc_info()[1].args[0]) if self.code < 400 or self.code > 599: raise ValueError("status must be between 400 and 599.") # See http://www.python.org/dev/peps/pep-0352/ # self.message = message self._message = message or defaultmsg CherryPyException.__init__(self, status, message) def set_response(self): """Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPError object and set its output without *raising* the exception. """ import cherrypy response = cherrypy.serving.response clean_headers(self.code) # In all cases, finalize will be called after this method, # so don't bother cleaning up response values here. response.status = self.status tb = None if cherrypy.serving.request.show_tracebacks: tb = format_exc() response.headers.pop('Content-Length', None) content = self.get_error_page(self.status, traceback=tb, message=self._message) response.body = content _be_ie_unfriendly(self.code) def get_error_page(self, *args, **kwargs): return get_error_page(*args, **kwargs) def __call__(self): """Use this exception as a request.handler (raise self).""" raise self class NotFound(HTTPError): """Exception raised when a URL could not be mapped to any handler (404). This is equivalent to raising :class:`HTTPError("404 Not Found") <cherrypy._cperror.HTTPError>`. """ def __init__(self, path=None): if path is None: import cherrypy request = cherrypy.serving.request path = request.script_name + request.path_info self.args = (path,) HTTPError.__init__(self, 404, "The path '%s' was not found." % path) _HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta> <title>%(status)s</title> <style type="text/css"> #powered_by { margin-top: 20px; border-top: 2px solid black; font-style: italic; } #traceback { color: red; } </style> </head> <body> <h2>%(status)s</h2> <p>%(message)s</p> <pre id="traceback">%(traceback)s</pre> <div id="powered_by"> <span> Powered by <a href="http://www.cherrypy.org">CherryPy %(version)s</a> </span> </div> </body> </html> ''' def get_error_page(status, **kwargs): """Return an HTML page, containing a pretty error response. status should be an int or a str. kwargs will be interpolated into the page template. """ import cherrypy try: code, reason, message = _httputil.valid_status(status) except ValueError: raise cherrypy.HTTPError(500, _exc_info()[1].args[0]) # We can't use setdefault here, because some # callers send None for kwarg values. if kwargs.get('status') is None: kwargs['status'] = "%s %s" % (code, reason) if kwargs.get('message') is None: kwargs['message'] = message if kwargs.get('traceback') is None: kwargs['traceback'] = '' if kwargs.get('version') is None: kwargs['version'] = cherrypy.__version__ for k, v in iteritems(kwargs): if v is None: kwargs[k] = "" else: kwargs[k] = _escape(kwargs[k]) # Use a custom template or callable for the error page? pages = cherrypy.serving.request.error_page error_page = pages.get(code) or pages.get('default') # Default template, can be overridden below. template = _HTTPErrorTemplate if error_page: try: if hasattr(error_page, '__call__'): # The caller function may be setting headers manually, # so we delegate to it completely. We may be returning # an iterator as well as a string here. # # We *must* make sure any content is not unicode. result = error_page(**kwargs) if cherrypy.lib.is_iterator(result): from cherrypy.lib.encoding import UTF8StreamEncoder return UTF8StreamEncoder(result) elif isinstance(result, cherrypy._cpcompat.unicodestr): return result.encode('utf-8') else: if not isinstance(result, cherrypy._cpcompat.bytestr): raise ValueError('error page function did not ' 'return a bytestring, unicodestring or an ' 'iterator - returned object of type %s.' % (type(result).__name__)) return result else: # Load the template from this path. template = tonative(open(error_page, 'rb').read()) except: e = _format_exception(*_exc_info())[-1] m = kwargs['message'] if m: m += "<br />" m += "In addition, the custom error page failed:\n<br />%s" % e kwargs['message'] = m response = cherrypy.serving.response response.headers['Content-Type'] = "text/html;charset=utf-8" result = template % kwargs return result.encode('utf-8') _ie_friendly_error_sizes = { 400: 512, 403: 256, 404: 512, 405: 256, 406: 512, 408: 512, 409: 512, 410: 256, 500: 512, 501: 512, 505: 512, } def _be_ie_unfriendly(status): import cherrypy response = cherrypy.serving.response # For some statuses, Internet Explorer 5+ shows "friendly error # messages" instead of our response.body if the body is smaller # than a given size. Fix this by returning a body over that size # (by adding whitespace). # See http://support.microsoft.com/kb/q218155/ s = _ie_friendly_error_sizes.get(status, 0) if s: s += 1 # Since we are issuing an HTTP error status, we assume that # the entity is short, and we should just collapse it. content = response.collapse_body() l = len(content) if l and l < s: # IN ADDITION: the response must be written to IE # in one chunk or it will still get replaced! Bah. content = content + (ntob(" ") * (s - l)) response.body = content response.headers['Content-Length'] = str(len(content)) def format_exc(exc=None): """Return exc (or sys.exc_info if None), formatted.""" try: if exc is None: exc = _exc_info() if exc == (None, None, None): return "" import traceback return "".join(traceback.format_exception(*exc)) finally: del exc def bare_error(extrabody=None): """Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as error-free as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended as-is to the body. """ # The whole point of this function is to be a last line-of-defense # in handling errors. That is, it must not raise any errors itself; # it cannot be allowed to fail. Therefore, don't add to it! # In particular, don't call any other CP functions. body = ntob("Unrecoverable error in the server.") if extrabody is not None: if not isinstance(extrabody, bytestr): extrabody = extrabody.encode('utf-8') body += ntob("\n") + extrabody return (ntob("500 Internal Server Error"), [(ntob('Content-Type'), ntob('text/plain')), (ntob('Content-Length'), ntob(str(len(body)), 'ISO-8859-1'))], [body])
22,680
Python
.py
481
39.164241
106
0.639663
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,882
__init__.py
evilhero_mylar/lib/cherrypy/__init__.py
"""CherryPy is a pythonic, object-oriented HTTP framework. CherryPy consists of not one, but four separate API layers. The APPLICATION LAYER is the simplest. CherryPy applications are written as a tree of classes and methods, where each branch in the tree corresponds to a branch in the URL path. Each method is a 'page handler', which receives GET and POST params as keyword arguments, and returns or yields the (HTML) body of the response. The special method name 'index' is used for paths that end in a slash, and the special method name 'default' is used to handle multiple paths via a single handler. This layer also includes: * the 'exposed' attribute (and cherrypy.expose) * cherrypy.quickstart() * _cp_config attributes * cherrypy.tools (including cherrypy.session) * cherrypy.url() The ENVIRONMENT LAYER is used by developers at all levels. It provides information about the current request and response, plus the application and server environment, via a (default) set of top-level objects: * cherrypy.request * cherrypy.response * cherrypy.engine * cherrypy.server * cherrypy.tree * cherrypy.config * cherrypy.thread_data * cherrypy.log * cherrypy.HTTPError, NotFound, and HTTPRedirect * cherrypy.lib The EXTENSION LAYER allows advanced users to construct and share their own plugins. It consists of: * Hook API * Tool API * Toolbox API * Dispatch API * Config Namespace API Finally, there is the CORE LAYER, which uses the core API's to construct the default components which are available at higher layers. You can think of the default components as the 'reference implementation' for CherryPy. Megaframeworks (and advanced users) may replace the default components with customized or extended components. The core API's are: * Application API * Engine API * Request API * Server API * WSGI API These API's are described in the `CherryPy specification <https://bitbucket.org/cherrypy/cherrypy/wiki/CherryPySpec>`_. """ __version__ = "3.6.0" from cherrypy._cpcompat import urljoin as _urljoin, urlencode as _urlencode from cherrypy._cpcompat import basestring, unicodestr, set from cherrypy._cperror import HTTPError, HTTPRedirect, InternalRedirect from cherrypy._cperror import NotFound, CherryPyException, TimeoutError from cherrypy import _cpdispatch as dispatch from cherrypy import _cptools tools = _cptools.default_toolbox Tool = _cptools.Tool from cherrypy import _cprequest from cherrypy.lib import httputil as _httputil from cherrypy import _cptree tree = _cptree.Tree() from cherrypy._cptree import Application from cherrypy import _cpwsgi as wsgi from cherrypy import process try: from cherrypy.process import win32 engine = win32.Win32Bus() engine.console_control_handler = win32.ConsoleCtrlHandler(engine) del win32 except ImportError: engine = process.bus # Timeout monitor. We add two channels to the engine # to which cherrypy.Application will publish. engine.listeners['before_request'] = set() engine.listeners['after_request'] = set() class _TimeoutMonitor(process.plugins.Monitor): def __init__(self, bus): self.servings = [] process.plugins.Monitor.__init__(self, bus, self.run) def before_request(self): self.servings.append((serving.request, serving.response)) def after_request(self): try: self.servings.remove((serving.request, serving.response)) except ValueError: pass def run(self): """Check timeout on all responses. (Internal)""" for req, resp in self.servings: resp.check_timeout() engine.timeout_monitor = _TimeoutMonitor(engine) engine.timeout_monitor.subscribe() engine.autoreload = process.plugins.Autoreloader(engine) engine.autoreload.subscribe() engine.thread_manager = process.plugins.ThreadManager(engine) engine.thread_manager.subscribe() engine.signal_handler = process.plugins.SignalHandler(engine) class _HandleSignalsPlugin(object): """Handle signals from other processes based on the configured platform handlers above.""" def __init__(self, bus): self.bus = bus def subscribe(self): """Add the handlers based on the platform""" if hasattr(self.bus, "signal_handler"): self.bus.signal_handler.subscribe() if hasattr(self.bus, "console_control_handler"): self.bus.console_control_handler.subscribe() engine.signals = _HandleSignalsPlugin(engine) from cherrypy import _cpserver server = _cpserver.Server() server.subscribe() def quickstart(root=None, script_name="", config=None): """Mount the given root, start the builtin server (and engine), then block. root: an instance of a "controller class" (a collection of page handler methods) which represents the root of the application. script_name: a string containing the "mount point" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to "http://www.example.com:8080/dept/app1/", then the script_name argument would be "/dept/app1". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). config: a file or dict containing application config. If this contains a [global] section, those entries will be used in the global (site-wide) config. """ if config: _global_conf_alias.update(config) tree.mount(root, script_name, config) engine.signals.subscribe() engine.start() engine.block() from cherrypy._cpcompat import threadlocal as _local class _Serving(_local): """An interface for registering request and response objects. Rather than have a separate "thread local" object for the request and the response, this class works as a single threadlocal container for both objects (and any others which developers wish to define). In this way, we can easily dump those objects when we stop/start a new HTTP conversation, yet still refer to them as module-level globals in a thread-safe way. """ request = _cprequest.Request(_httputil.Host("127.0.0.1", 80), _httputil.Host("127.0.0.1", 1111)) """ The request object for the current thread. In the main thread, and any threads which are not receiving HTTP requests, this is None.""" response = _cprequest.Response() """ The response object for the current thread. In the main thread, and any threads which are not receiving HTTP requests, this is None.""" def load(self, request, response): self.request = request self.response = response def clear(self): """Remove all attributes of self.""" self.__dict__.clear() serving = _Serving() class _ThreadLocalProxy(object): __slots__ = ['__attrname__', '__dict__'] def __init__(self, attrname): self.__attrname__ = attrname def __getattr__(self, name): child = getattr(serving, self.__attrname__) return getattr(child, name) def __setattr__(self, name, value): if name in ("__attrname__", ): object.__setattr__(self, name, value) else: child = getattr(serving, self.__attrname__) setattr(child, name, value) def __delattr__(self, name): child = getattr(serving, self.__attrname__) delattr(child, name) def _get_dict(self): child = getattr(serving, self.__attrname__) d = child.__class__.__dict__.copy() d.update(child.__dict__) return d __dict__ = property(_get_dict) def __getitem__(self, key): child = getattr(serving, self.__attrname__) return child[key] def __setitem__(self, key, value): child = getattr(serving, self.__attrname__) child[key] = value def __delitem__(self, key): child = getattr(serving, self.__attrname__) del child[key] def __contains__(self, key): child = getattr(serving, self.__attrname__) return key in child def __len__(self): child = getattr(serving, self.__attrname__) return len(child) def __nonzero__(self): child = getattr(serving, self.__attrname__) return bool(child) # Python 3 __bool__ = __nonzero__ # Create request and response object (the same objects will be used # throughout the entire life of the webserver, but will redirect # to the "serving" object) request = _ThreadLocalProxy('request') response = _ThreadLocalProxy('response') # Create thread_data object as a thread-specific all-purpose storage class _ThreadData(_local): """A container for thread-specific data.""" thread_data = _ThreadData() # Monkeypatch pydoc to allow help() to go through the threadlocal proxy. # Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve. # The only other way would be to change what is returned from type(request) # and that's not possible in pure Python (you'd have to fake ob_type). def _cherrypy_pydoc_resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, _ThreadLocalProxy): thing = getattr(serving, thing.__attrname__) return _pydoc._builtin_resolve(thing, forceload) try: import pydoc as _pydoc _pydoc._builtin_resolve = _pydoc.resolve _pydoc.resolve = _cherrypy_pydoc_resolve except ImportError: pass from cherrypy import _cplogging class _GlobalLogManager(_cplogging.LogManager): """A site-wide LogManager; routes to app.log or global log as appropriate. This :class:`LogManager<cherrypy._cplogging.LogManager>` implements cherrypy.log() and cherrypy.log.access(). If either function is called during a request, the message will be sent to the logger for the current Application. If they are called outside of a request, the message will be sent to the site-wide logger. """ def __call__(self, *args, **kwargs): """Log the given message to the app.log or global log as appropriate. """ # Do NOT use try/except here. See # https://bitbucket.org/cherrypy/cherrypy/issue/945 if hasattr(request, 'app') and hasattr(request.app, 'log'): log = request.app.log else: log = self return log.error(*args, **kwargs) def access(self): """Log an access message to the app.log or global log as appropriate. """ try: return request.app.log.access() except AttributeError: return _cplogging.LogManager.access(self) log = _GlobalLogManager() # Set a default screen handler on the global log. log.screen = True log.error_file = '' # Using an access file makes CP about 10% slower. Leave off by default. log.access_file = '' def _buslog(msg, level): log.error(msg, 'ENGINE', severity=level) engine.subscribe('log', _buslog) # Helper functions for CP apps # def expose(func=None, alias=None): """Expose the function, optionally providing an alias or set of aliases.""" def expose_(func): func.exposed = True if alias is not None: if isinstance(alias, basestring): parents[alias.replace(".", "_")] = func else: for a in alias: parents[a.replace(".", "_")] = func return func import sys import types if isinstance(func, (types.FunctionType, types.MethodType)): if alias is None: # @expose func.exposed = True return func else: # func = expose(func, alias) parents = sys._getframe(1).f_locals return expose_(func) elif func is None: if alias is None: # @expose() parents = sys._getframe(1).f_locals return expose_ else: # @expose(alias="alias") or # @expose(alias=["alias1", "alias2"]) parents = sys._getframe(1).f_locals return expose_ else: # @expose("alias") or # @expose(["alias1", "alias2"]) parents = sys._getframe(1).f_locals alias = func return expose_ def popargs(*args, **kwargs): """A decorator for _cp_dispatch (cherrypy.dispatch.Dispatcher.dispatch_method_name). Optional keyword argument: handler=(Object or Function) Provides a _cp_dispatch function that pops off path segments into cherrypy.request.params under the names specified. The dispatch is then forwarded on to the next vpath element. Note that any existing (and exposed) member function of the class that popargs is applied to will override that value of the argument. For instance, if you have a method named "list" on the class decorated with popargs, then accessing "/list" will call that function instead of popping it off as the requested parameter. This restriction applies to all _cp_dispatch functions. The only way around this restriction is to create a "blank class" whose only function is to provide _cp_dispatch. If there are path elements after the arguments, or more arguments are requested than are available in the vpath, then the 'handler' keyword argument specifies the next object to handle the parameterized request. If handler is not specified or is None, then self is used. If handler is a function rather than an instance, then that function will be called with the args specified and the return value from that function used as the next object INSTEAD of adding the parameters to cherrypy.request.args. This decorator may be used in one of two ways: As a class decorator: @cherrypy.popargs('year', 'month', 'day') class Blog: def index(self, year=None, month=None, day=None): #Process the parameters here; any url like #/, /2009, /2009/12, or /2009/12/31 #will fill in the appropriate parameters. def create(self): #This link will still be available at /create. Defined functions #take precedence over arguments. Or as a member of a class: class Blog: _cp_dispatch = cherrypy.popargs('year', 'month', 'day') #... The handler argument may be used to mix arguments with built in functions. For instance, the following setup allows different activities at the day, month, and year level: class DayHandler: def index(self, year, month, day): #Do something with this day; probably list entries def delete(self, year, month, day): #Delete all entries for this day @cherrypy.popargs('day', handler=DayHandler()) class MonthHandler: def index(self, year, month): #Do something with this month; probably list entries def delete(self, year, month): #Delete all entries for this month @cherrypy.popargs('month', handler=MonthHandler()) class YearHandler: def index(self, year): #Do something with this year #... @cherrypy.popargs('year', handler=YearHandler()) class Root: def index(self): #... """ # Since keyword arg comes after *args, we have to process it ourselves # for lower versions of python. handler = None handler_call = False for k, v in kwargs.items(): if k == 'handler': handler = v else: raise TypeError( "cherrypy.popargs() got an unexpected keyword argument '{0}'" .format(k) ) import inspect if handler is not None \ and (hasattr(handler, '__call__') or inspect.isclass(handler)): handler_call = True def decorated(cls_or_self=None, vpath=None): if inspect.isclass(cls_or_self): # cherrypy.popargs is a class decorator cls = cls_or_self setattr(cls, dispatch.Dispatcher.dispatch_method_name, decorated) return cls # We're in the actual function self = cls_or_self parms = {} for arg in args: if not vpath: break parms[arg] = vpath.pop(0) if handler is not None: if handler_call: return handler(**parms) else: request.params.update(parms) return handler request.params.update(parms) # If we are the ultimate handler, then to prevent our _cp_dispatch # from being called again, we will resolve remaining elements through # getattr() directly. if vpath: return getattr(self, vpath.pop(0), None) else: return self return decorated def url(path="", qs="", script_name=None, base=None, relative=None): """Create an absolute URL for the given path. If 'path' starts with a slash ('/'), this will return (base + script_name + path + qs). If it does not start with a slash, this returns (base + script_name [+ request.path_info] + path + qs). If script_name is None, cherrypy.request will be used to find a script_name, if available. If base is None, cherrypy.request.base will be used (if available). Note that you can use cherrypy.tools.proxy to change this. Finally, note that this function can be used to obtain an absolute URL for the current request path (minus the querystring) by passing no args. If you call url(qs=cherrypy.request.query_string), you should get the original browser URL (assuming no internal redirections). If relative is None or not provided, request.app.relative_urls will be used (if available, else False). If False, the output will be an absolute URL (including the scheme, host, vhost, and script_name). If True, the output will instead be a URL that is relative to the current request path, perhaps including '..' atoms. If relative is the string 'server', the output will instead be a URL that is relative to the server root; i.e., it will start with a slash. """ if isinstance(qs, (tuple, list, dict)): qs = _urlencode(qs) if qs: qs = '?' + qs if request.app: if not path.startswith("/"): # Append/remove trailing slash from path_info as needed # (this is to support mistyped URL's without redirecting; # if you want to redirect, use tools.trailing_slash). pi = request.path_info if request.is_index is True: if not pi.endswith('/'): pi = pi + '/' elif request.is_index is False: if pi.endswith('/') and pi != '/': pi = pi[:-1] if path == "": path = pi else: path = _urljoin(pi, path) if script_name is None: script_name = request.script_name if base is None: base = request.base newurl = base + script_name + path + qs else: # No request.app (we're being called outside a request). # We'll have to guess the base from server.* attributes. # This will produce very different results from the above # if you're using vhosts or tools.proxy. if base is None: base = server.base() path = (script_name or "") + path newurl = base + path + qs if './' in newurl: # Normalize the URL by removing ./ and ../ atoms = [] for atom in newurl.split('/'): if atom == '.': pass elif atom == '..': atoms.pop() else: atoms.append(atom) newurl = '/'.join(atoms) # At this point, we should have a fully-qualified absolute URL. if relative is None: relative = getattr(request.app, "relative_urls", False) # See http://www.ietf.org/rfc/rfc2396.txt if relative == 'server': # "A relative reference beginning with a single slash character is # termed an absolute-path reference, as defined by <abs_path>..." # This is also sometimes called "server-relative". newurl = '/' + '/'.join(newurl.split('/', 3)[3:]) elif relative: # "A relative reference that does not begin with a scheme name # or a slash character is termed a relative-path reference." old = url(relative=False).split('/')[:-1] new = newurl.split('/') while old and new: a, b = old[0], new[0] if a != b: break old.pop(0) new.pop(0) new = (['..'] * len(old)) + new newurl = '/'.join(new) return newurl # import _cpconfig last so it can reference other top-level objects from cherrypy import _cpconfig # Use _global_conf_alias so quickstart can use 'config' as an arg # without shadowing cherrypy.config. config = _global_conf_alias = _cpconfig.Config() config.defaults = { 'tools.log_tracebacks.on': True, 'tools.log_headers.on': True, 'tools.trailing_slash.on': True, 'tools.encode.on': True } config.namespaces["log"] = lambda k, v: setattr(log, k, v) config.namespaces["checker"] = lambda k, v: setattr(checker, k, v) # Must reset to get our defaults applied. config.reset() from cherrypy import _cpchecker checker = _cpchecker.Checker() engine.subscribe('start', checker)
21,722
Python
.py
510
35.639216
119
0.661936
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,883
_cpserver.py
evilhero_mylar/lib/cherrypy/_cpserver.py
"""Manage HTTP servers with CherryPy.""" import warnings import cherrypy from cherrypy.lib import attributes from cherrypy._cpcompat import basestring, py3k # We import * because we want to export check_port # et al as attributes of this module. from cherrypy.process.servers import * class Server(ServerAdapter): """An adapter for an HTTP server. You can set attributes (like socket_host and socket_port) on *this* object (which is probably cherrypy.server), and call quickstart. For example:: cherrypy.server.socket_port = 80 cherrypy.quickstart() """ socket_port = 8080 """The TCP port on which to listen for connections.""" _socket_host = '127.0.0.1' def _get_socket_host(self): return self._socket_host def _set_socket_host(self, value): if value == '': raise ValueError("The empty string ('') is not an allowed value. " "Use '0.0.0.0' instead to listen on all active " "interfaces (INADDR_ANY).") self._socket_host = value socket_host = property( _get_socket_host, _set_socket_host, doc="""The hostname or IP address on which to listen for connections. Host values may be any IPv4 or IPv6 address, or any valid hostname. The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). The string '0.0.0.0' is a special IPv4 entry meaning "any active interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for IPv6. The empty string or None are not allowed.""") socket_file = None """If given, the name of the UNIX socket to use instead of TCP/IP. When this option is not None, the `socket_host` and `socket_port` options are ignored.""" socket_queue_size = 5 """The 'backlog' argument to socket.listen(); specifies the maximum number of queued connections (default 5).""" socket_timeout = 10 """The timeout in seconds for accepted connections (default 10).""" accepted_queue_size = -1 """The maximum number of requests which will be queued up before the server refuses to accept it (default -1, meaning no limit).""" accepted_queue_timeout = 10 """The timeout in seconds for attempting to add a request to the queue when the queue is full (default 10).""" shutdown_timeout = 5 """The time to wait for HTTP worker threads to clean up.""" protocol_version = 'HTTP/1.1' """The version string to write in the Status-Line of all HTTP responses, for example, "HTTP/1.1" (the default). Depending on the HTTP server used, this should also limit the supported features used in the response.""" thread_pool = 10 """The number of worker threads to start up in the pool.""" thread_pool_max = -1 """The maximum size of the worker-thread pool. Use -1 to indicate no limit. """ max_request_header_size = 500 * 1024 """The maximum number of bytes allowable in the request headers. If exceeded, the HTTP server should return "413 Request Entity Too Large". """ max_request_body_size = 100 * 1024 * 1024 """The maximum number of bytes allowable in the request body. If exceeded, the HTTP server should return "413 Request Entity Too Large".""" instance = None """If not None, this should be an HTTP server instance (such as CPWSGIServer) which cherrypy.server will control. Use this when you need more control over object instantiation than is available in the various configuration options.""" ssl_context = None """When using PyOpenSSL, an instance of SSL.Context.""" ssl_certificate = None """The filename of the SSL certificate to use.""" ssl_certificate_chain = None """When using PyOpenSSL, the certificate chain to pass to Context.load_verify_locations.""" ssl_private_key = None """The filename of the private key to use with SSL.""" if py3k: ssl_module = 'builtin' """The name of a registered SSL adaptation module to use with the builtin WSGI server. Builtin options are: 'builtin' (to use the SSL library built into recent versions of Python). You may also register your own classes in the wsgiserver.ssl_adapters dict.""" else: ssl_module = 'pyopenssl' """The name of a registered SSL adaptation module to use with the builtin WSGI server. Builtin options are 'builtin' (to use the SSL library built into recent versions of Python) and 'pyopenssl' (to use the PyOpenSSL project, which you must install separately). You may also register your own classes in the wsgiserver.ssl_adapters dict.""" statistics = False """Turns statistics-gathering on or off for aware HTTP servers.""" nodelay = True """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" wsgi_version = (1, 0) """The WSGI version tuple to use with the builtin WSGI server. The provided options are (1, 0) [which includes support for PEP 3333, which declares it covers WSGI version 1.0.1 but still mandates the wsgi.version (1, 0)] and ('u', 0), an experimental unicode version. You may create and register your own experimental versions of the WSGI protocol by adding custom classes to the wsgiserver.wsgi_gateways dict.""" def __init__(self): self.bus = cherrypy.engine self.httpserver = None self.interrupt = None self.running = False def httpserver_from_self(self, httpserver=None): """Return a (httpserver, bind_addr) pair based on self attributes.""" if httpserver is None: httpserver = self.instance if httpserver is None: from cherrypy import _cpwsgi_server httpserver = _cpwsgi_server.CPWSGIServer(self) if isinstance(httpserver, basestring): # Is anyone using this? Can I add an arg? httpserver = attributes(httpserver)(self) return httpserver, self.bind_addr def start(self): """Start the HTTP server.""" if not self.httpserver: self.httpserver, self.bind_addr = self.httpserver_from_self() ServerAdapter.start(self) start.priority = 75 def _get_bind_addr(self): if self.socket_file: return self.socket_file if self.socket_host is None and self.socket_port is None: return None return (self.socket_host, self.socket_port) def _set_bind_addr(self, value): if value is None: self.socket_file = None self.socket_host = None self.socket_port = None elif isinstance(value, basestring): self.socket_file = value self.socket_host = None self.socket_port = None else: try: self.socket_host, self.socket_port = value self.socket_file = None except ValueError: raise ValueError("bind_addr must be a (host, port) tuple " "(for TCP sockets) or a string (for Unix " "domain sockets), not %r" % value) bind_addr = property( _get_bind_addr, _set_bind_addr, doc='A (host, port) tuple for TCP sockets or ' 'a str for Unix domain sockets.') def base(self): """Return the base (scheme://host[:port] or sock file) for this server. """ if self.socket_file: return self.socket_file host = self.socket_host if host in ('0.0.0.0', '::'): # 0.0.0.0 is INADDR_ANY and :: is IN6ADDR_ANY. # Look up the host name, which should be the # safest thing to spit out in a URL. import socket host = socket.gethostname() port = self.socket_port if self.ssl_certificate: scheme = "https" if port != 443: host += ":%s" % port else: scheme = "http" if port != 80: host += ":%s" % port return "%s://%s" % (scheme, host)
8,275
Python
.py
182
36.972527
79
0.637607
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,884
_cptree.py
evilhero_mylar/lib/cherrypy/_cptree.py
"""CherryPy Application and Tree objects.""" import os import cherrypy from cherrypy._cpcompat import ntou, py3k from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools from cherrypy.lib import httputil class Application(object): """A CherryPy Application. Servers and gateways should not instantiate Request objects directly. Instead, they should ask an Application object for a request object. An instance of this class may also be used as a WSGI callable (WSGI application object) for itself. """ root = None """The top-most container of page handlers for this app. Handlers should be arranged in a hierarchy of attributes, matching the expected URI hierarchy; the default dispatcher then searches this hierarchy for a matching handler. When using a dispatcher other than the default, this value may be None.""" config = {} """A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict of {key: value} pairs.""" namespaces = _cpconfig.NamespaceSet() toolboxes = {'tools': cherrypy.tools} log = None """A LogManager instance. See _cplogging.""" wsgiapp = None """A CPWSGIApp instance. See _cpwsgi.""" request_class = _cprequest.Request response_class = _cprequest.Response relative_urls = False def __init__(self, root, script_name="", config=None): self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root) self.root = root self.script_name = script_name self.wsgiapp = _cpwsgi.CPWSGIApp(self) self.namespaces = self.namespaces.copy() self.namespaces["log"] = lambda k, v: setattr(self.log, k, v) self.namespaces["wsgi"] = self.wsgiapp.namespace_handler self.config = self.__class__.config.copy() if config: self.merge(config) def __repr__(self): return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__, self.root, self.script_name) script_name_doc = """The URI "mount point" for this app. A mount point is that portion of the URI which is constant for all URIs that are serviced by this application; it does not include scheme, host, or proxy ("virtual host") portions of the URI. For example, if script_name is "/my/cool/app", then the URL "http://www.example.com/my/cool/app/page1" might be handled by a "page1" method on the root object. The value of script_name MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). If script_name is explicitly set to None, then the script_name will be provided for each call from request.wsgi_environ['SCRIPT_NAME']. """ def _get_script_name(self): if self._script_name is not None: return self._script_name # A `_script_name` with a value of None signals that the script name # should be pulled from WSGI environ. return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip("/") def _set_script_name(self, value): if value: value = value.rstrip("/") self._script_name = value script_name = property(fget=_get_script_name, fset=_set_script_name, doc=script_name_doc) def merge(self, config): """Merge the given config into self.config.""" _cpconfig.merge(self.config, config) # Handle namespaces specified in config. self.namespaces(self.config.get("/", {})) def find_config(self, path, key, default=None): """Return the most-specific value for key along path, or default.""" trail = path or "/" while trail: nodeconf = self.config.get(trail, {}) if key in nodeconf: return nodeconf[key] lastslash = trail.rfind("/") if lastslash == -1: break elif lastslash == 0 and trail != "/": trail = "/" else: trail = trail[:lastslash] return default def get_serving(self, local, remote, scheme, sproto): """Create and return a Request and Response object.""" req = self.request_class(local, remote, scheme, sproto) req.app = self for name, toolbox in self.toolboxes.items(): req.namespaces[name] = toolbox resp = self.response_class() cherrypy.serving.load(req, resp) cherrypy.engine.publish('acquire_thread') cherrypy.engine.publish('before_request') return req, resp def release_serving(self): """Release the current serving (request and response).""" req = cherrypy.serving.request cherrypy.engine.publish('after_request') try: req.close() except: cherrypy.log(traceback=True, severity=40) cherrypy.serving.clear() def __call__(self, environ, start_response): return self.wsgiapp(environ, start_response) class Tree(object): """A registry of CherryPy applications, mounted at diverse points. An instance of this class may also be used as a WSGI callable (WSGI application object), in which case it dispatches to all mounted apps. """ apps = {} """ A dict of the form {script name: application}, where "script name" is a string declaring the URI mount point (no trailing slash), and "application" is an instance of cherrypy.Application (or an arbitrary WSGI callable if you happen to be using a WSGI server).""" def __init__(self): self.apps = {} def mount(self, root, script_name="", config=None): """Mount a new app from a root object, script_name, and config. root An instance of a "controller class" (a collection of page handler methods) which represents the root of the application. This may also be an Application instance, or None if using a dispatcher other than the default. script_name A string containing the "mount point" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to "http://www.example.com:8080/dept/app1/", then the script_name argument would be "/dept/app1". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). config A file or dict containing application config. """ if script_name is None: raise TypeError( "The 'script_name' argument may not be None. Application " "objects may, however, possess a script_name of None (in " "order to inpect the WSGI environ for SCRIPT_NAME upon each " "request). You cannot mount such Applications on this Tree; " "you must pass them to a WSGI server interface directly.") # Next line both 1) strips trailing slash and 2) maps "/" -> "". script_name = script_name.rstrip("/") if isinstance(root, Application): app = root if script_name != "" and script_name != app.script_name: raise ValueError( "Cannot specify a different script name and pass an " "Application instance to cherrypy.mount") script_name = app.script_name else: app = Application(root, script_name) # If mounted at "", add favicon.ico if (script_name == "" and root is not None and not hasattr(root, "favicon_ico")): favicon = os.path.join(os.getcwd(), os.path.dirname(__file__), "favicon.ico") root.favicon_ico = tools.staticfile.handler(favicon) if config: app.merge(config) self.apps[script_name] = app return app def graft(self, wsgi_callable, script_name=""): """Mount a wsgi callable at the given script_name.""" # Next line both 1) strips trailing slash and 2) maps "/" -> "". script_name = script_name.rstrip("/") self.apps[script_name] = wsgi_callable def script_name(self, path=None): """The script_name of the app at the given path, or None. If path is None, cherrypy.request is used. """ if path is None: try: request = cherrypy.serving.request path = httputil.urljoin(request.script_name, request.path_info) except AttributeError: return None while True: if path in self.apps: return path if path == "": return None # Move one node up the tree and try again. path = path[:path.rfind("/")] def __call__(self, environ, start_response): # If you're calling this, then you're probably setting SCRIPT_NAME # to '' (some WSGI servers always set SCRIPT_NAME to ''). # Try to look up the app using the full path. env1x = environ if environ.get(ntou('wsgi.version')) == (ntou('u'), 0): env1x = _cpwsgi.downgrade_wsgi_ux_to_1x(environ) path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''), env1x.get('PATH_INFO', '')) sn = self.script_name(path or "/") if sn is None: start_response('404 Not Found', []) return [] app = self.apps[sn] # Correct the SCRIPT_NAME and PATH_INFO environ entries. environ = environ.copy() if not py3k: if environ.get(ntou('wsgi.version')) == (ntou('u'), 0): # Python 2/WSGI u.0: all strings MUST be of type unicode enc = environ[ntou('wsgi.url_encoding')] environ[ntou('SCRIPT_NAME')] = sn.decode(enc) environ[ntou('PATH_INFO')] = path[ len(sn.rstrip("/")):].decode(enc) else: # Python 2/WSGI 1.x: all strings MUST be of type str environ['SCRIPT_NAME'] = sn environ['PATH_INFO'] = path[len(sn.rstrip("/")):] else: if environ.get(ntou('wsgi.version')) == (ntou('u'), 0): # Python 3/WSGI u.0: all strings MUST be full unicode environ['SCRIPT_NAME'] = sn environ['PATH_INFO'] = path[len(sn.rstrip("/")):] else: # Python 3/WSGI 1.x: all strings MUST be ISO-8859-1 str environ['SCRIPT_NAME'] = sn.encode( 'utf-8').decode('ISO-8859-1') environ['PATH_INFO'] = path[ len(sn.rstrip("/")):].encode('utf-8').decode('ISO-8859-1') return app(environ, start_response)
11,085
Python
.py
233
36.95279
79
0.598183
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,885
_cpreqbody.py
evilhero_mylar/lib/cherrypy/_cpreqbody.py
"""Request body processing for CherryPy. .. versionadded:: 3.2 Application authors have complete control over the parsing of HTTP request entities. In short, :attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>` is now always set to an instance of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>`, and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`. When an HTTP request includes an entity body, it is often desirable to provide that information to applications in a form other than the raw bytes. Different content types demand different approaches. Examples: * For a GIF file, we want the raw bytes in a stream. * An HTML form is better parsed into its component fields, and each text field decoded from bytes to unicode. * A JSON body should be deserialized into a Python dict or list. When the request contains a Content-Type header, the media type is used as a key to look up a value in the :attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict. If the full media type is not found, then the major type is tried; for example, if no processor is found for the 'image/jpeg' type, then we look for a processor for the 'image' types altogether. If neither the full type nor the major type has a matching processor, then a default processor is used (:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most types, this means no processing is done, and the body is left unread as a raw byte stream. Processors are configurable in an 'on_start_resource' hook. Some processors, especially those for the 'text' types, attempt to decode bytes to unicode. If the Content-Type request header includes a 'charset' parameter, this is used to decode the entity. Otherwise, one or more default charsets may be attempted, although this decision is up to each processor. If a processor successfully decodes an Entity or Part, it should set the :attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute on the Entity or Part to the name of the successful charset, so that applications can easily re-encode or transcode the value if they wish. If the Content-Type of the request entity is of major type 'multipart', then the above parsing process, and possibly a decoding process, is performed for each part. For both the full entity and multipart parts, a Content-Disposition header may be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and :attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the request.body or the Part. .. _custombodyprocessors: Custom Processors ================= You can add your own processors for any specific or major MIME type. Simply add it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a hook/tool that runs at ``on_start_resource`` or ``before_request_body``. Here's the built-in JSON tool for an example:: def json_in(force=True, debug=False): request = cherrypy.serving.request def json_processor(entity): \"""Read application/json data into request.json.\""" if not entity.headers.get("Content-Length", ""): raise cherrypy.HTTPError(411) body = entity.fp.read() try: request.json = json_decode(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') if force: request.body.processors.clear() request.body.default_proc = cherrypy.HTTPError( 415, 'Expected an application/json content type') request.body.processors['application/json'] = json_processor We begin by defining a new ``json_processor`` function to stick in the ``processors`` dictionary. All processor functions take a single argument, the ``Entity`` instance they are to process. It will be called whenever a request is received (for those URI's where the tool is turned on) which has a ``Content-Type`` of "application/json". First, it checks for a valid ``Content-Length`` (raising 411 if not valid), then reads the remaining bytes on the socket. The ``fp`` object knows its own length, so it won't hang waiting for data that never arrives. It will return when all data has been read. Then, we decode those bytes using Python's built-in ``json`` module, and stick the decoded result onto ``request.json`` . If it cannot be decoded, we raise 400. If the "force" argument is True (the default), the ``Tool`` clears the ``processors`` dict so that request entities of other ``Content-Types`` aren't parsed at all. Since there's no entry for those invalid MIME types, the ``default_proc`` method of ``cherrypy.request.body`` is called. But this does nothing by default (usually to provide the page handler an opportunity to handle it.) But in our case, we want to raise 415, so we replace ``request.body.default_proc`` with the error (``HTTPError`` instances, when called, raise themselves). If we were defining a custom processor, we can do so without making a ``Tool``. Just add the config entry:: request.body.processors = {'application/json': json_processor} Note that you can only replace the ``processors`` dict wholesale this way, not update the existing one. """ try: from io import DEFAULT_BUFFER_SIZE except ImportError: DEFAULT_BUFFER_SIZE = 8192 import re import sys import tempfile try: from urllib import unquote_plus except ImportError: def unquote_plus(bs): """Bytes version of urllib.parse.unquote_plus.""" bs = bs.replace(ntob('+'), ntob(' ')) atoms = bs.split(ntob('%')) for i in range(1, len(atoms)): item = atoms[i] try: pct = int(item[:2], 16) atoms[i] = bytes([pct]) + item[2:] except ValueError: pass return ntob('').join(atoms) import cherrypy from cherrypy._cpcompat import basestring, ntob, ntou from cherrypy.lib import httputil # ------------------------------- Processors -------------------------------- # def process_urlencoded(entity): """Read application/x-www-form-urlencoded data into entity.params.""" qs = entity.fp.read() for charset in entity.attempt_charsets: try: params = {} for aparam in qs.split(ntob('&')): for pair in aparam.split(ntob(';')): if not pair: continue atoms = pair.split(ntob('='), 1) if len(atoms) == 1: atoms.append(ntob('')) key = unquote_plus(atoms[0]).decode(charset) value = unquote_plus(atoms[1]).decode(charset) if key in params: if not isinstance(params[key], list): params[key] = [params[key]] params[key].append(value) else: params[key] = value except UnicodeDecodeError: pass else: entity.charset = charset break else: raise cherrypy.HTTPError( 400, "The request entity could not be decoded. The following " "charsets were attempted: %s" % repr(entity.attempt_charsets)) # Now that all values have been successfully parsed and decoded, # apply them to the entity.params dict. for key, value in params.items(): if key in entity.params: if not isinstance(entity.params[key], list): entity.params[key] = [entity.params[key]] entity.params[key].append(value) else: entity.params[key] = value def process_multipart(entity): """Read all multipart parts into entity.parts.""" ib = "" if 'boundary' in entity.content_type.params: # http://tools.ietf.org/html/rfc2046#section-5.1.1 # "The grammar for parameters on the Content-type field is such that it # is often necessary to enclose the boundary parameter values in quotes # on the Content-type line" ib = entity.content_type.params['boundary'].strip('"') if not re.match("^[ -~]{0,200}[!-~]$", ib): raise ValueError('Invalid boundary in multipart form: %r' % (ib,)) ib = ('--' + ib).encode('ascii') # Find the first marker while True: b = entity.readline() if not b: return b = b.strip() if b == ib: break # Read all parts while True: part = entity.part_class.from_fp(entity.fp, ib) entity.parts.append(part) part.process() if part.fp.done: break def process_multipart_form_data(entity): """Read all multipart/form-data parts into entity.parts or entity.params. """ process_multipart(entity) kept_parts = [] for part in entity.parts: if part.name is None: kept_parts.append(part) else: if part.filename is None: # It's a regular field value = part.fullvalue() else: # It's a file upload. Retain the whole part so consumer code # has access to its .file and .filename attributes. value = part if part.name in entity.params: if not isinstance(entity.params[part.name], list): entity.params[part.name] = [entity.params[part.name]] entity.params[part.name].append(value) else: entity.params[part.name] = value entity.parts = kept_parts def _old_process_multipart(entity): """The behavior of 3.2 and lower. Deprecated and will be changed in 3.3.""" process_multipart(entity) params = entity.params for part in entity.parts: if part.name is None: key = ntou('parts') else: key = part.name if part.filename is None: # It's a regular field value = part.fullvalue() else: # It's a file upload. Retain the whole part so consumer code # has access to its .file and .filename attributes. value = part if key in params: if not isinstance(params[key], list): params[key] = [params[key]] params[key].append(value) else: params[key] = value # -------------------------------- Entities --------------------------------- # class Entity(object): """An HTTP request body, or MIME multipart body. This class collects information about the HTTP request entity. When a given entity is of MIME type "multipart", each part is parsed into its own Entity instance, and the set of parts stored in :attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`. Between the ``before_request_body`` and ``before_handler`` tools, CherryPy tries to process the request body (if any) by calling :func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`. This uses the ``content_type`` of the Entity to look up a suitable processor in :attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`, a dict. If a matching processor cannot be found for the complete Content-Type, it tries again using the major type. For example, if a request with an entity of type "image/jpeg" arrives, but no processor can be found for that complete type, then one is sought for the major type "image". If a processor is still not found, then the :func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method of the Entity is called (which does nothing by default; you can override this too). CherryPy includes processors for the "application/x-www-form-urlencoded" type, the "multipart/form-data" type, and the "multipart" major type. CherryPy 3.2 processes these types almost exactly as older versions. Parts are passed as arguments to the page handler using their ``Content-Disposition.name`` if given, otherwise in a generic "parts" argument. Each such part is either a string, or the :class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this case it will have ``file`` and ``filename`` attributes, or possibly a ``value`` attribute). Each Part is itself a subclass of Entity, and has its own ``process`` method and ``processors`` dict. There is a separate processor for the "multipart" major type which is more flexible, and simply stores all multipart parts in :attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can enable it with:: cherrypy.request.body.processors['multipart'] = _cpreqbody.process_multipart in an ``on_start_resource`` tool. """ # http://tools.ietf.org/html/rfc2046#section-4.1.2: # "The default character set, which must be assumed in the # absence of a charset parameter, is US-ASCII." # However, many browsers send data in utf-8 with no charset. attempt_charsets = ['utf-8'] """A list of strings, each of which should be a known encoding. When the Content-Type of the request body warrants it, each of the given encodings will be tried in order. The first one to successfully decode the entity without raising an error is stored as :attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by `HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_), but ``['us-ascii', 'utf-8']`` for multipart parts. """ charset = None """The successful decoding; see "attempt_charsets" above.""" content_type = None """The value of the Content-Type request header. If the Entity is part of a multipart payload, this will be the Content-Type given in the MIME headers for this part. """ default_content_type = 'application/x-www-form-urlencoded' """This defines a default ``Content-Type`` to use if no Content-Type header is given. The empty string is used for RequestBody, which results in the request body not being read or parsed at all. This is by design; a missing ``Content-Type`` header in the HTTP request entity is an error at best, and a security hole at worst. For multipart parts, however, the MIME spec declares that a part with no Content-Type defaults to "text/plain" (see :class:`Part<cherrypy._cpreqbody.Part>`). """ filename = None """The ``Content-Disposition.filename`` header, if available.""" fp = None """The readable socket file object.""" headers = None """A dict of request/multipart header names and values. This is a copy of the ``request.headers`` for the ``request.body``; for multipart parts, it is the set of headers for that part. """ length = None """The value of the ``Content-Length`` header, if provided.""" name = None """The "name" parameter of the ``Content-Disposition`` header, if any.""" params = None """ If the request Content-Type is 'application/x-www-form-urlencoded' or multipart, this will be a dict of the params pulled from the entity body; that is, it will be the portion of request.params that come from the message body (sometimes called "POST params", although they can be sent with various HTTP method verbs). This value is set between the 'before_request_body' and 'before_handler' hooks (assuming that process_request_body is True).""" processors = {'application/x-www-form-urlencoded': process_urlencoded, 'multipart/form-data': process_multipart_form_data, 'multipart': process_multipart, } """A dict of Content-Type names to processor methods.""" parts = None """A list of Part instances if ``Content-Type`` is of major type "multipart".""" part_class = None """The class used for multipart parts. You can replace this with custom subclasses to alter the processing of multipart parts. """ def __init__(self, fp, headers, params=None, parts=None): # Make an instance-specific copy of the class processors # so Tools, etc. can replace them per-request. self.processors = self.processors.copy() self.fp = fp self.headers = headers if params is None: params = {} self.params = params if parts is None: parts = [] self.parts = parts # Content-Type self.content_type = headers.elements('Content-Type') if self.content_type: self.content_type = self.content_type[0] else: self.content_type = httputil.HeaderElement.from_str( self.default_content_type) # Copy the class 'attempt_charsets', prepending any Content-Type # charset dec = self.content_type.params.get("charset", None) if dec: self.attempt_charsets = [dec] + [c for c in self.attempt_charsets if c != dec] else: self.attempt_charsets = self.attempt_charsets[:] # Length self.length = None clen = headers.get('Content-Length', None) # If Transfer-Encoding is 'chunked', ignore any Content-Length. if ( clen is not None and 'chunked' not in headers.get('Transfer-Encoding', '') ): try: self.length = int(clen) except ValueError: pass # Content-Disposition self.name = None self.filename = None disp = headers.elements('Content-Disposition') if disp: disp = disp[0] if 'name' in disp.params: self.name = disp.params['name'] if self.name.startswith('"') and self.name.endswith('"'): self.name = self.name[1:-1] if 'filename' in disp.params: self.filename = disp.params['filename'] if ( self.filename.startswith('"') and self.filename.endswith('"') ): self.filename = self.filename[1:-1] # The 'type' attribute is deprecated in 3.2; remove it in 3.3. type = property( lambda self: self.content_type, doc="A deprecated alias for " ":attr:`content_type<cherrypy._cpreqbody.Entity.content_type>`." ) def read(self, size=None, fp_out=None): return self.fp.read(size, fp_out) def readline(self, size=None): return self.fp.readline(size) def readlines(self, sizehint=None): return self.fp.readlines(sizehint) def __iter__(self): return self def __next__(self): line = self.readline() if not line: raise StopIteration return line def next(self): return self.__next__() def read_into_file(self, fp_out=None): """Read the request body into fp_out (or make_file() if None). Return fp_out. """ if fp_out is None: fp_out = self.make_file() self.read(fp_out=fp_out) return fp_out def make_file(self): """Return a file-like object into which the request body will be read. By default, this will return a TemporaryFile. Override as needed. See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`.""" return tempfile.TemporaryFile() def fullvalue(self): """Return this entity as a string, whether stored in a file or not.""" if self.file: # It was stored in a tempfile. Read it. self.file.seek(0) value = self.file.read() self.file.seek(0) else: value = self.value return value def process(self): """Execute the best-match processor for the given media type.""" proc = None ct = self.content_type.value try: proc = self.processors[ct] except KeyError: toptype = ct.split('/', 1)[0] try: proc = self.processors[toptype] except KeyError: pass if proc is None: self.default_proc() else: proc(self) def default_proc(self): """Called if a more-specific processor is not found for the ``Content-Type``. """ # Leave the fp alone for someone else to read. This works fine # for request.body, but the Part subclasses need to override this # so they can move on to the next part. pass class Part(Entity): """A MIME part entity, part of a multipart entity.""" # "The default character set, which must be assumed in the absence of a # charset parameter, is US-ASCII." attempt_charsets = ['us-ascii', 'utf-8'] """A list of strings, each of which should be a known encoding. When the Content-Type of the request body warrants it, each of the given encodings will be tried in order. The first one to successfully decode the entity without raising an error is stored as :attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by `HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_), but ``['us-ascii', 'utf-8']`` for multipart parts. """ boundary = None """The MIME multipart boundary.""" default_content_type = 'text/plain' """This defines a default ``Content-Type`` to use if no Content-Type header is given. The empty string is used for RequestBody, which results in the request body not being read or parsed at all. This is by design; a missing ``Content-Type`` header in the HTTP request entity is an error at best, and a security hole at worst. For multipart parts, however (this class), the MIME spec declares that a part with no Content-Type defaults to "text/plain". """ # This is the default in stdlib cgi. We may want to increase it. maxrambytes = 1000 """The threshold of bytes after which point the ``Part`` will store its data in a file (generated by :func:`make_file<cherrypy._cprequest.Entity.make_file>`) instead of a string. Defaults to 1000, just like the :mod:`cgi` module in Python's standard library. """ def __init__(self, fp, headers, boundary): Entity.__init__(self, fp, headers) self.boundary = boundary self.file = None self.value = None def from_fp(cls, fp, boundary): headers = cls.read_headers(fp) return cls(fp, headers, boundary) from_fp = classmethod(from_fp) def read_headers(cls, fp): headers = httputil.HeaderMap() while True: line = fp.readline() if not line: # No more data--illegal end of headers raise EOFError("Illegal end of headers.") if line == ntob('\r\n'): # Normal end of headers break if not line.endswith(ntob('\r\n')): raise ValueError("MIME requires CRLF terminators: %r" % line) if line[0] in ntob(' \t'): # It's a continuation line. v = line.strip().decode('ISO-8859-1') else: k, v = line.split(ntob(":"), 1) k = k.strip().decode('ISO-8859-1') v = v.strip().decode('ISO-8859-1') existing = headers.get(k) if existing: v = ", ".join((existing, v)) headers[k] = v return headers read_headers = classmethod(read_headers) def read_lines_to_boundary(self, fp_out=None): """Read bytes from self.fp and return or write them to a file. If the 'fp_out' argument is None (the default), all bytes read are returned in a single byte string. If the 'fp_out' argument is not None, it must be a file-like object that supports the 'write' method; all bytes read will be written to the fp, and that fp is returned. """ endmarker = self.boundary + ntob("--") delim = ntob("") prev_lf = True lines = [] seen = 0 while True: line = self.fp.readline(1 << 16) if not line: raise EOFError("Illegal end of multipart body.") if line.startswith(ntob("--")) and prev_lf: strippedline = line.strip() if strippedline == self.boundary: break if strippedline == endmarker: self.fp.finish() break line = delim + line if line.endswith(ntob("\r\n")): delim = ntob("\r\n") line = line[:-2] prev_lf = True elif line.endswith(ntob("\n")): delim = ntob("\n") line = line[:-1] prev_lf = True else: delim = ntob("") prev_lf = False if fp_out is None: lines.append(line) seen += len(line) if seen > self.maxrambytes: fp_out = self.make_file() for line in lines: fp_out.write(line) else: fp_out.write(line) if fp_out is None: result = ntob('').join(lines) for charset in self.attempt_charsets: try: result = result.decode(charset) except UnicodeDecodeError: pass else: self.charset = charset return result else: raise cherrypy.HTTPError( 400, "The request entity could not be decoded. The following " "charsets were attempted: %s" % repr(self.attempt_charsets) ) else: fp_out.seek(0) return fp_out def default_proc(self): """Called if a more-specific processor is not found for the ``Content-Type``. """ if self.filename: # Always read into a file if a .filename was given. self.file = self.read_into_file() else: result = self.read_lines_to_boundary() if isinstance(result, basestring): self.value = result else: self.file = result def read_into_file(self, fp_out=None): """Read the request body into fp_out (or make_file() if None). Return fp_out. """ if fp_out is None: fp_out = self.make_file() self.read_lines_to_boundary(fp_out=fp_out) return fp_out Entity.part_class = Part try: inf = float('inf') except ValueError: # Python 2.4 and lower class Infinity(object): def __cmp__(self, other): return 1 def __sub__(self, other): return self inf = Infinity() comma_separated_headers = [ 'Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'Te', 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'Www-Authenticate' ] class SizedReader: def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE, has_trailers=False): # Wrap our fp in a buffer so peek() works self.fp = fp self.length = length self.maxbytes = maxbytes self.buffer = ntob('') self.bufsize = bufsize self.bytes_read = 0 self.done = False self.has_trailers = has_trailers def read(self, size=None, fp_out=None): """Read bytes from the request body and return or write them to a file. A number of bytes less than or equal to the 'size' argument are read off the socket. The actual number of bytes read are tracked in self.bytes_read. The number may be smaller than 'size' when 1) the client sends fewer bytes, 2) the 'Content-Length' request header specifies fewer bytes than requested, or 3) the number of bytes read exceeds self.maxbytes (in which case, 413 is raised). If the 'fp_out' argument is None (the default), all bytes read are returned in a single byte string. If the 'fp_out' argument is not None, it must be a file-like object that supports the 'write' method; all bytes read will be written to the fp, and None is returned. """ if self.length is None: if size is None: remaining = inf else: remaining = size else: remaining = self.length - self.bytes_read if size and size < remaining: remaining = size if remaining == 0: self.finish() if fp_out is None: return ntob('') else: return None chunks = [] # Read bytes from the buffer. if self.buffer: if remaining is inf: data = self.buffer self.buffer = ntob('') else: data = self.buffer[:remaining] self.buffer = self.buffer[remaining:] datalen = len(data) remaining -= datalen # Check lengths. self.bytes_read += datalen if self.maxbytes and self.bytes_read > self.maxbytes: raise cherrypy.HTTPError(413) # Store the data. if fp_out is None: chunks.append(data) else: fp_out.write(data) # Read bytes from the socket. while remaining > 0: chunksize = min(remaining, self.bufsize) try: data = self.fp.read(chunksize) except Exception: e = sys.exc_info()[1] if e.__class__.__name__ == 'MaxSizeExceeded': # Post data is too big raise cherrypy.HTTPError( 413, "Maximum request length: %r" % e.args[1]) else: raise if not data: self.finish() break datalen = len(data) remaining -= datalen # Check lengths. self.bytes_read += datalen if self.maxbytes and self.bytes_read > self.maxbytes: raise cherrypy.HTTPError(413) # Store the data. if fp_out is None: chunks.append(data) else: fp_out.write(data) if fp_out is None: return ntob('').join(chunks) def readline(self, size=None): """Read a line from the request body and return it.""" chunks = [] while size is None or size > 0: chunksize = self.bufsize if size is not None and size < self.bufsize: chunksize = size data = self.read(chunksize) if not data: break pos = data.find(ntob('\n')) + 1 if pos: chunks.append(data[:pos]) remainder = data[pos:] self.buffer += remainder self.bytes_read -= len(remainder) break else: chunks.append(data) return ntob('').join(chunks) def readlines(self, sizehint=None): """Read lines from the request body and return them.""" if self.length is not None: if sizehint is None: sizehint = self.length - self.bytes_read else: sizehint = min(sizehint, self.length - self.bytes_read) lines = [] seen = 0 while True: line = self.readline() if not line: break lines.append(line) seen += len(line) if seen >= sizehint: break return lines def finish(self): self.done = True if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'): self.trailers = {} try: for line in self.fp.read_trailer_lines(): if line[0] in ntob(' \t'): # It's a continuation line. v = line.strip() else: try: k, v = line.split(ntob(":"), 1) except ValueError: raise ValueError("Illegal header line.") k = k.strip().title() v = v.strip() if k in comma_separated_headers: existing = self.trailers.get(envname) if existing: v = ntob(", ").join((existing, v)) self.trailers[k] = v except Exception: e = sys.exc_info()[1] if e.__class__.__name__ == 'MaxSizeExceeded': # Post data is too big raise cherrypy.HTTPError( 413, "Maximum request length: %r" % e.args[1]) else: raise class RequestBody(Entity): """The entity of the HTTP request.""" bufsize = 8 * 1024 """The buffer size used when reading the socket.""" # Don't parse the request body at all if the client didn't provide # a Content-Type header. See # https://bitbucket.org/cherrypy/cherrypy/issue/790 default_content_type = '' """This defines a default ``Content-Type`` to use if no Content-Type header is given. The empty string is used for RequestBody, which results in the request body not being read or parsed at all. This is by design; a missing ``Content-Type`` header in the HTTP request entity is an error at best, and a security hole at worst. For multipart parts, however, the MIME spec declares that a part with no Content-Type defaults to "text/plain" (see :class:`Part<cherrypy._cpreqbody.Part>`). """ maxbytes = None """Raise ``MaxSizeExceeded`` if more bytes than this are read from the socket. """ def __init__(self, fp, headers, params=None, request_params=None): Entity.__init__(self, fp, headers, params) # http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1 # When no explicit charset parameter is provided by the # sender, media subtypes of the "text" type are defined # to have a default charset value of "ISO-8859-1" when # received via HTTP. if self.content_type.value.startswith('text/'): for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'): if c in self.attempt_charsets: break else: self.attempt_charsets.append('ISO-8859-1') # Temporary fix while deprecating passing .parts as .params. self.processors['multipart'] = _old_process_multipart if request_params is None: request_params = {} self.request_params = request_params def process(self): """Process the request entity based on its Content-Type.""" # "The presence of a message-body in a request is signaled by the # inclusion of a Content-Length or Transfer-Encoding header field in # the request's message-headers." # It is possible to send a POST request with no body, for example; # however, app developers are responsible in that case to set # cherrypy.request.process_body to False so this method isn't called. h = cherrypy.serving.request.headers if 'Content-Length' not in h and 'Transfer-Encoding' not in h: raise cherrypy.HTTPError(411) self.fp = SizedReader(self.fp, self.length, self.maxbytes, bufsize=self.bufsize, has_trailers='Trailer' in h) super(RequestBody, self).process() # Body params should also be a part of the request_params # add them in here. request_params = self.request_params for key, value in self.params.items(): # Python 2 only: keyword arguments must be byte strings (type # 'str'). if sys.version_info < (3, 0): if isinstance(key, unicode): key = key.encode('ISO-8859-1') if key in request_params: if not isinstance(request_params[key], list): request_params[key] = [request_params[key]] request_params[key].append(value) else: request_params[key] = value
37,362
Python
.py
852
33.862676
84
0.601034
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,886
_cpcompat_subprocess.py
evilhero_mylar/lib/cherrypy/_cpcompat_subprocess.py
# subprocess - Subprocesses with accessible I/O streams # # For more information about this module, see PEP 324. # # This module should remain compatible with Python 2.2, see PEP 291. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. r"""subprocess - Subprocesses with accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, and obtain their return codes. This module intends to replace several other, older modules and functions, like: os.system os.spawn* os.popen* popen2.* commands.* Information about how the subprocess module can be used to replace these modules and functions can be found below. Using the subprocess module =========================== This module defines one class called Popen: class Popen(args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): Arguments are: args should be a string, or a sequence of program arguments. The program to execute is normally the first item in the args sequence or string, but can be explicitly set by using the executable argument. On UNIX, with shell=False (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated as a sequence with the string as the only item (the program to execute). On UNIX, with shell=True: If args is a string, it specifies the command string to execute through the shell. If args is a sequence, the first item specifies the command string, and any additional items will be treated as additional shell arguments. On Windows: the Popen class uses CreateProcess() to execute the child program, which operates on strings. If args is a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline is designed for applications using the same rules as the MS C runtime. bufsize, if given, has the same meaning as the corresponding argument to the built-in open() function: 0 means unbuffered, 1 means line buffered, any other positive value means use a buffer of (approximately) that size. A negative bufsize means to use the system default, which usually means fully buffered. The default value for bufsize is 0 (unbuffered). stdin, stdout and stderr specify the executed programs' standard input, standard output and standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, and None. PIPE indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. If preexec_fn is set to a callable object, this object will be called in the child process just before the child is executed. If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. if shell is true, the specified command will be executed through the shell. If cwd is not None, the current directory will be changed to cwd before the child is executed. If env is not None, it defines the environment variables for the new process. If universal_newlines is true, the file objects stdout and stderr are opened as a text files, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the Macintosh convention or '\r\n', the Windows convention. All of these external representations are seen as '\n' by the Python program. Note: This feature is only available if Python is built with universal newline support (the default). Also, the newlines attribute of the file objects stdout, stdin and stderr are not updated by the communicate() method. The startupinfo and creationflags, if given, will be passed to the underlying CreateProcess() function. They can specify things such as appearance of the main window and priority for the new process. (Windows only) This module also defines some shortcut functions: call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) check_call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) check_output(*popenargs, **kwargs): Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: output = check_output(["ls", "-l", "/dev/null"]) Exceptions ---------- Exceptions raised in the child process, before the new program has started to execute, will be re-raised in the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which is a string containing traceback information from the childs point of view. The most common exception raised is OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare for OSErrors. A ValueError will be raised if Popen is called with invalid arguments. check_call() and check_output() will raise CalledProcessError, if the called process returns a non-zero return code. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be passed to child processes. Popen objects ============= Instances of the Popen class have the following methods: poll() Check if child process has terminated. Returns returncode attribute. wait() Wait for child process to terminate. Returns returncode attribute. communicate(input=None) Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr). Note: The data read is buffered in memory, so do not use this method if the data size is large or unlimited. The following attributes are also available: stdin If the stdin argument is PIPE, this attribute is a file object that provides input to the child process. Otherwise, it is None. stdout If the stdout argument is PIPE, this attribute is a file object that provides output from the child process. Otherwise, it is None. stderr If the stderr argument is PIPE, this attribute is file object that provides error output from the child process. Otherwise, it is None. pid The process ID of the child process. returncode The child return code. A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (UNIX only). Replacing older functions with the subprocess module ==================================================== In this section, "a ==> b" means that b can be used as a replacement for a. Note: All functions in this section fail (more or less) silently if the executed program cannot be found; this module raises an OSError exception. In the following examples, we assume that the subprocess module is imported with "from subprocess import *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=True) pid, sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell is usually not required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: try: retcode = call("mycmd" + " myarg", shell=True) if retcode < 0: print >>sys.stderr, "Child was terminated by signal", -retcode else: print >>sys.stderr, "Child returned", retcode except OSError, e: print >>sys.stderr, "Execution failed:", e Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) Replacing os.popen* ------------------- pipe = os.popen("cmd", mode='r', bufsize) ==> pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout pipe = os.popen("cmd", mode='w', bufsize) ==> pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin (child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize) ==> p = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdin, child_stdout) = (p.stdin, p.stdout) (child_stdin, child_stdout, child_stderr) = os.popen3("cmd", mode, bufsize) ==> p = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) (child_stdin, child_stdout, child_stderr) = (p.stdin, p.stdout, p.stderr) (child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode, bufsize) ==> p = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) (child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout) On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as the command to execute, in which case arguments will be passed directly to the program without shell intervention. This usage can be replaced as follows: (child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode, bufsize) ==> p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE) (child_stdin, child_stdout) = (p.stdin, p.stdout) Return code handling translates as follows: pipe = os.popen("cmd", 'w') ... rc = pipe.close() if rc is not None and rc % 256: print "There were some errors" ==> process = Popen("cmd", 'w', shell=True, stdin=PIPE) ... process.stdin.close() if process.wait() != 0: print "There were some errors" Replacing popen2.* ------------------ (child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode) ==> p = Popen(["somestring"], shell=True, bufsize=bufsize stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdout, child_stdin) = (p.stdout, p.stdin) On Unix, popen2 also accepts a sequence as the command to execute, in which case arguments will be passed directly to the program without shell intervention. This usage can be replaced as follows: (child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode) ==> p = Popen(["mycmd", "myarg"], bufsize=bufsize, stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdout, child_stdin) = (p.stdout, p.stdin) The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen, except that: * subprocess.Popen raises an exception if the execution fails * the capturestderr argument is replaced with the stderr argument. * stdin=PIPE and stdout=PIPE must be specified. * popen2 closes all filedescriptors by default, but you have to specify close_fds=True with subprocess.Popen. """ import sys mswindows = (sys.platform == "win32") import os import types import traceback import gc import signal import errno try: set except NameError: from sets import Set as set # Exception classes used by this module. class CalledProcessError(Exception): """This exception is raised when a process run by check_call() or check_output() returns a non-zero exit status. The exit status will be stored in the returncode attribute; check_output() will also store the output in the output attribute. """ def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % ( self.cmd, self.returncode) if mswindows: import threading import msvcrt import _subprocess class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select _has_poll = hasattr(select, 'poll') import fcntl import pickle # When select or poll has indicated that the file is writable, # we can write up to _PIPE_BUF bytes without risk of blocking. # POSIX defines PIPE_BUF as >= 512. _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "check_output", "CalledProcessError"] if mswindows: from _subprocess import CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, \ STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, \ STD_ERROR_HANDLE, SW_HIDE, \ STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", "STD_ERROR_HANDLE", "SW_HIDE", "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 _active = [] def _cleanup(): for inst in _active[:]: res = inst._internal_poll(_deadstate=sys.maxint) if res is not None: try: _active.remove(inst) except ValueError: # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. pass PIPE = -1 STDOUT = -2 def _eintr_retry_call(func, *args): while True: try: return func(*args) except (OSError, IOError), e: if e.errno == errno.EINTR: continue raise def call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ return Popen(*popenargs, **kwargs).wait() def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd) return 0 def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = Popen(stdout=PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd, output=output) return output def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules as the MS C runtime: 1) Arguments are delimited by white space, which is either a space or a tab. 2) A string surrounded by double quotation marks is interpreted as a single argument, regardless of white space contained within. A quoted string can be embedded in an argument. 3) A double quotation mark preceded by a backslash is interpreted as a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes is interpreted as a literal backslash. If the number of backslashes is odd, the last backslash escapes the next double quotation mark as described in rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # or search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = False for arg in seq: bs_buf = [] # Add a space to separate this argument from the others if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backslashes. result.append('\\' * len(bs_buf) * 2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) class Popen(object): def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): """Create new Popen instance.""" _cleanup() self._child_created = False if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") if close_fds and (stdin is not None or stdout is not None or stderr is not None): raise ValueError("close_fds is not supported on Windows " "platforms if you redirect " "stdin/stdout/stderr") else: # POSIX if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are None when not using PIPEs. The child objects are None # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if mswindows: if p2cwrite is not None: p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) if c2pread is not None: c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) if errread is not None: errread = msvcrt.open_osfhandle(errread.Detach(), 0) if p2cwrite is not None: self.stdin = os.fdopen(p2cwrite, 'wb', bufsize) if c2pread is not None: if universal_newlines: self.stdout = os.fdopen(c2pread, 'rU', bufsize) else: self.stdout = os.fdopen(c2pread, 'rb', bufsize) if errread is not None: if universal_newlines: self.stderr = os.fdopen(errread, 'rU', bufsize) else: self.stderr = os.fdopen(errread, 'rb', bufsize) def _translate_newlines(self, data): data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") return data def __del__(self, _maxint=sys.maxint, _active=_active): # If __init__ hasn't had a chance to execute (e.g. if it # was passed an undeclared keyword argument), we don't # have a _child_created attribute at all. if not getattr(self, '_child_created', False): # We didn't get to successfully create a child process. return # In case the child hasn't been waited on, check if it's done. self._internal_poll(_deadstate=_maxint) if self.returncode is None and _active is not None: # Child is still running, keep us alive until we can wait on it. _active.append(self) def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" # Optimization: If we are only using one pipe, or no pipe at # all, using select() or threads is unnecessary. if [self.stdin, self.stdout, self.stderr].count(None) >= 2: stdout = None stderr = None if self.stdin: if input: try: self.stdin.write(input) except IOError, e: if e.errno != errno.EPIPE and e.errno != errno.EINVAL: raise self.stdin.close() elif self.stdout: stdout = _eintr_retry_call(self.stdout.read) self.stdout.close() elif self.stderr: stderr = _eintr_retry_call(self.stderr.read) self.stderr.close() self.wait() return (stdout, stderr) return self._communicate(input) def poll(self): return self._internal_poll() if mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin is None and stdout is None and stderr is None: return (None, None, None, None, None, None) p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: p2cread = _subprocess.GetStdHandle( _subprocess.STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = _subprocess.CreatePipe(None, 0) elif stdin == PIPE: p2cread, p2cwrite = _subprocess.CreatePipe(None, 0) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = _subprocess.GetStdHandle( _subprocess.STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = _subprocess.CreatePipe(None, 0) elif stdout == PIPE: c2pread, c2pwrite = _subprocess.CreatePipe(None, 0) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = _subprocess.GetStdHandle( _subprocess.STD_ERROR_HANDLE) if errwrite is None: _, errwrite = _subprocess.CreatePipe(None, 0) elif stderr == PIPE: errread, errwrite = _subprocess.CreatePipe(None, 0) elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" return _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, _subprocess.GetCurrentProcess(), 0, 1, _subprocess.DUPLICATE_SAME_ACCESS ) def _find_w9xpopen(self): """Find and return absolut path to w9xpopen.exe""" w9xpopen = os.path.join( os.path.dirname(_subprocess.GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): # Eeek - file-not-found - possibly an embedding # situation - see if we can locate it in sys.exec_prefix w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (MS Windows version)""" if not isinstance(args, types.StringTypes): args = list2cmdline(args) # Process startup details if startupinfo is None: startupinfo = STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _subprocess.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format(comspec, args) if (_subprocess.GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com"): # Win9x, or using command.com on NT. We need to # use the w9xpopen intermediate program. For more # information, see KB Q150956 # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp) w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) # Not passing CREATE_NEW_CONSOLE has been known to # cause random failures on win9x. Specifically a # dialog: "Your program accessed mem currently in # use at xxx" and a hopeful warning about the # stability of your system. Cost is Ctrl+C wont # kill children. creationflags |= _subprocess.CREATE_NEW_CONSOLE # Start the process try: try: hp, ht, pid, tid = _subprocess.CreateProcess( executable, args, # no special # security None, None, int(not close_fds), creationflags, env, cwd, startupinfo) except pywintypes.error, e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really # translate errno using _sys_errlist (or similar), but # how can this be done from Python? raise WindowsError(*e.args) finally: # Child is launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained in this process or else the # pipe will not close when the child process exits and the # ReadFile will hang. if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() # Retain the process handle, but close the thread handle self._child_created = True self._handle = hp self.pid = pid ht.Close() def _internal_poll( self, _deadstate=None, _WaitForSingleObject=_subprocess.WaitForSingleObject, _WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0, _GetExitCodeProcess=_subprocess.GetExitCodeProcess ): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it can only refer to objects in its local scope. """ if self.returncode is None: if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: self.returncode = _GetExitCodeProcess(self._handle) return self.returncode def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: _subprocess.WaitForSingleObject(self._handle, _subprocess.INFINITE) self.returncode = _subprocess.GetExitCodeProcess(self._handle) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) def _communicate(self, input): stdout = None # Return stderr = None # Return if self.stdout: stdout = [] stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout)) stdout_thread.setDaemon(True) stdout_thread.start() if self.stderr: stderr = [] stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr)) stderr_thread.setDaemon(True) stderr_thread.start() if self.stdin: if input is not None: try: self.stdin.write(input) except IOError, e: if e.errno != errno.EPIPE: raise self.stdin.close() if self.stdout: stdout_thread.join() if self.stderr: stderr_thread.join() # All data exchanged. Translate lists into strings. if stdout is not None: stdout = stdout[0] if stderr is not None: stderr = stderr[0] # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) elif sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) else: raise ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ _subprocess.TerminateProcess(self._handle, 1) kill = terminate else: # # POSIX methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = self.pipe_cloexec() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = self.pipe_cloexec() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = self.pipe_cloexec() elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _set_cloexec_flag(self, fd, cloexec=True): try: cloexec_flag = fcntl.FD_CLOEXEC except AttributeError: cloexec_flag = 1 old = fcntl.fcntl(fd, fcntl.F_GETFD) if cloexec: fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) else: fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) def pipe_cloexec(self): """Create a pipe with FDs set CLOEXEC.""" # Pipes' FDs are set CLOEXEC by default because we don't want them # to be inherited by other subprocesses: the CLOEXEC flag is # removed from the child's FDs by _dup2(), between fork() and # exec(). # This is not atomic: we would need the pipe2() syscall for that. r, w = os.pipe() self._set_cloexec_flag(r) self._set_cloexec_flag(w) return r, w def _close_fds(self, but): if hasattr(os, 'closerange'): os.closerange(3, but) os.closerange(but + 1, MAXFD) else: for i in xrange(3, MAXFD): if i == but: continue try: os.close(i) except: pass def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = self.pipe_cloexec() try: try: gc_was_enabled = gc.isenabled() # Disable gc to avoid bug where gc -> file_dealloc -> # write to stderr -> hang. # http://bugs.python.org/issue1336 gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) # When duping fds, if there arises a situation # where one of the fds is either 0, 1 or 2, it # is possible that it is overwritten (#12607). if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) # Dup fds for child def _dup2(a, b): # dup2() removes the CLOEXEC flag but # we must do it ourselves if dup2() # would be a no-op (issue #10806). if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) # Close pipe fds. Make sure we don't close the # same fd more than once, or standard fds. closed = set([None]) for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception # object exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) # This exitcode won't be reported to applications, # so it really doesn't matter what we return. os._exit(255) # Parent if gc_was_enabled: gc.enable() finally: # be sure the FD is closed no matter what os.close(errpipe_write) if p2cread is not None and p2cwrite is not None: os.close(p2cread) if c2pwrite is not None and c2pread is not None: os.close(c2pwrite) if errwrite is not None and errread is not None: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception # Exception limited to 1M data = _eintr_retry_call(os.read, errpipe_read, 1048576) finally: # be sure the FD is closed no matter what os.close(errpipe_read) if data != "": try: _eintr_retry_call(os.waitpid, self.pid, 0) except OSError, e: if e.errno != errno.ECHILD: raise child_exception = pickle.loads(data) for fd in (p2cwrite, c2pread, errread): if fd is not None: os.close(fd) raise child_exception def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED, _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED, _WEXITSTATUS=os.WEXITSTATUS): # This method is called (indirectly) by __del__, so it cannot # refer to anything outside of its local scope.""" if _WIFSIGNALED(sts): self.returncode = -_WTERMSIG(sts) elif _WIFEXITED(sts): self.returncode = _WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid, _WNOHANG=os.WNOHANG, _os_error=os.error): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it cannot reference anything outside of the local scope (nor can any methods it calls). """ if self.returncode is None: try: pid, sts = _waitpid(self.pid, _WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except _os_error: if _deadstate is not None: self.returncode = _deadstate return self.returncode def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: try: pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0) except OSError, e: if e.errno != errno.ECHILD: raise # This happens if SIGCLD is set to be ignored or waiting # for child processes has otherwise been disabled for our # process. This child is dead, we can't get the status. sts = 0 self._handle_exitstatus(sts) return self.returncode def _communicate(self, input): if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if not input: self.stdin.close() if _has_poll: stdout, stderr = self._communicate_with_poll(input) else: stdout, stderr = self._communicate_with_select(input) # All data exchanged. Translate lists into strings. if stdout is not None: stdout = ''.join(stdout) if stderr is not None: stderr = ''.join(stderr) # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) def _communicate_with_poll(self, input): stdout = None # Return stderr = None # Return fd2file = {} fd2output = {} poller = select.poll() def register_and_append(file_obj, eventmask): poller.register(file_obj.fileno(), eventmask) fd2file[file_obj.fileno()] = file_obj def close_unregister_and_remove(fd): poller.unregister(fd) fd2file[fd].close() fd2file.pop(fd) if self.stdin and input: register_and_append(self.stdin, select.POLLOUT) select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI if self.stdout: register_and_append(self.stdout, select_POLLIN_POLLPRI) fd2output[self.stdout.fileno()] = stdout = [] if self.stderr: register_and_append(self.stderr, select_POLLIN_POLLPRI) fd2output[self.stderr.fileno()] = stderr = [] input_offset = 0 while fd2file: try: ready = poller.poll() except select.error, e: if e.args[0] == errno.EINTR: continue raise for fd, mode in ready: if mode & select.POLLOUT: chunk = input[input_offset: input_offset + _PIPE_BUF] try: input_offset += os.write(fd, chunk) except OSError, e: if e.errno == errno.EPIPE: close_unregister_and_remove(fd) else: raise else: if input_offset >= len(input): close_unregister_and_remove(fd) elif mode & select_POLLIN_POLLPRI: data = os.read(fd, 4096) if not data: close_unregister_and_remove(fd) fd2output[fd].append(data) else: # Ignore hang up or errors. close_unregister_and_remove(fd) return (stdout, stderr) def _communicate_with_select(self, input): read_set = [] write_set = [] stdout = None # Return stderr = None # Return if self.stdin and input: write_set.append(self.stdin) if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr: read_set.append(self.stderr) stderr = [] input_offset = 0 while read_set or write_set: try: rlist, wlist, xlist = select.select( read_set, write_set, []) except select.error, e: if e.args[0] == errno.EINTR: continue raise if self.stdin in wlist: chunk = input[input_offset: input_offset + _PIPE_BUF] try: bytes_written = os.write(self.stdin.fileno(), chunk) except OSError, e: if e.errno == errno.EPIPE: self.stdin.close() write_set.remove(self.stdin) else: raise else: input_offset += bytes_written if input_offset >= len(input): self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) stderr.append(data) return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process with SIGKILL """ self.send_signal(signal.SIGKILL) def _demo_posix(): # # Example 1: Simple redirection: Get process list # plist = Popen(["ps"], stdout=PIPE).communicate()[0] print "Process list:" print plist # # Example 2: Change uid before executing child # if os.getuid() == 0: p = Popen(["id"], preexec_fn=lambda: os.setuid(100)) p.wait() # # Example 3: Connecting several subprocesses # print "Looking for 'hda'..." p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) print repr(p2.communicate()[0]) # # Example 4: Catch execution error # print print "Trying a weird file..." try: print Popen(["/this/path/does/not/exist"]).communicate() except OSError, e: if e.errno == errno.ENOENT: print "The file didn't exist. I thought so..." print "Child traceback:" print e.child_traceback else: print "Error", e.errno else: print >>sys.stderr, "Gosh. No error." def _demo_windows(): # # Example 1: Connecting several subprocesses # print "Looking for 'PROMPT' in set output..." p1 = Popen("set", stdout=PIPE, shell=True) p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE) print repr(p2.communicate()[0]) # # Example 2: Simple execution of program # print "Executing calc..." p = Popen("calc") p.wait() if __name__ == "__main__": if mswindows: _demo_windows() else: _demo_posix()
55,872
Python
.py
1,277
31.21065
128
0.560448
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,887
_cptools.py
evilhero_mylar/lib/cherrypy/_cptools.py
"""CherryPy tools. A "tool" is any helper, adapted to CP. Tools are usually designed to be used in a variety of ways (although some may only offer one if they choose): Library calls All tools are callables that can be used wherever needed. The arguments are straightforward and should be detailed within the docstring. Function decorators All tools, when called, may be used as decorators which configure individual CherryPy page handlers (methods on the CherryPy tree). That is, "@tools.anytool()" should "turn on" the tool via the decorated function's _cp_config attribute. CherryPy config If a tool exposes a "_setup" callable, it will be called once per Request (if the feature is "turned on" via config). Tools may be implemented as any object with a namespace. The builtins are generally either modules or instances of the tools.Tool class. """ import sys import warnings import cherrypy def _getargs(func): """Return the names of all static arguments to the given function.""" # Use this instead of importing inspect for less mem overhead. import types if sys.version_info >= (3, 0): if isinstance(func, types.MethodType): func = func.__func__ co = func.__code__ else: if isinstance(func, types.MethodType): func = func.im_func co = func.func_code return co.co_varnames[:co.co_argcount] _attr_error = ( "CherryPy Tools cannot be turned on directly. Instead, turn them " "on via config, or use them as decorators on your page handlers." ) class Tool(object): """A registered function for use with CherryPy request-processing hooks. help(tool.callable) should give you more information about this Tool. """ namespace = "tools" def __init__(self, point, callable, name=None, priority=50): self._point = point self.callable = callable self._name = name self._priority = priority self.__doc__ = self.callable.__doc__ self._setargs() def _get_on(self): raise AttributeError(_attr_error) def _set_on(self, value): raise AttributeError(_attr_error) on = property(_get_on, _set_on) def _setargs(self): """Copy func parameter names to obj attributes.""" try: for arg in _getargs(self.callable): setattr(self, arg, None) except (TypeError, AttributeError): if hasattr(self.callable, "__call__"): for arg in _getargs(self.callable.__call__): setattr(self, arg, None) # IronPython 1.0 raises NotImplementedError because # inspect.getargspec tries to access Python bytecode # in co_code attribute. except NotImplementedError: pass # IronPython 1B1 may raise IndexError in some cases, # but if we trap it here it doesn't prevent CP from working. except IndexError: pass def _merged_args(self, d=None): """Return a dict of configuration entries for this Tool.""" if d: conf = d.copy() else: conf = {} tm = cherrypy.serving.request.toolmaps[self.namespace] if self._name in tm: conf.update(tm[self._name]) if "on" in conf: del conf["on"] return conf def __call__(self, *args, **kwargs): """Compile-time decorator (turn on the tool in config). For example:: @tools.proxy() def whats_my_base(self): return cherrypy.request.base whats_my_base.exposed = True """ if args: raise TypeError("The %r Tool does not accept positional " "arguments; you must use keyword arguments." % self._name) def tool_decorator(f): if not hasattr(f, "_cp_config"): f._cp_config = {} subspace = self.namespace + "." + self._name + "." f._cp_config[subspace + "on"] = True for k, v in kwargs.items(): f._cp_config[subspace + k] = v return f return tool_decorator def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ conf = self._merged_args() p = conf.pop("priority", None) if p is None: p = getattr(self.callable, "priority", self._priority) cherrypy.serving.request.hooks.attach(self._point, self.callable, priority=p, **conf) class HandlerTool(Tool): """Tool which is called 'before main', that may skip normal handlers. If the tool successfully handles the request (by setting response.body), if should return True. This will cause CherryPy to skip any 'normal' page handler. If the tool did not handle the request, it should return False to tell CherryPy to continue on and call the normal page handler. If the tool is declared AS a page handler (see the 'handler' method), returning False will raise NotFound. """ def __init__(self, callable, name=None): Tool.__init__(self, 'before_handler', callable, name) def handler(self, *args, **kwargs): """Use this tool as a CherryPy page handler. For example:: class Root: nav = tools.staticdir.handler(section="/nav", dir="nav", root=absDir) """ def handle_func(*a, **kw): handled = self.callable(*args, **self._merged_args(kwargs)) if not handled: raise cherrypy.NotFound() return cherrypy.serving.response.body handle_func.exposed = True return handle_func def _wrapper(self, **kwargs): if self.callable(**kwargs): cherrypy.serving.request.handler = None def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ conf = self._merged_args() p = conf.pop("priority", None) if p is None: p = getattr(self.callable, "priority", self._priority) cherrypy.serving.request.hooks.attach(self._point, self._wrapper, priority=p, **conf) class HandlerWrapperTool(Tool): """Tool which wraps request.handler in a provided wrapper function. The 'newhandler' arg must be a handler wrapper function that takes a 'next_handler' argument, plus ``*args`` and ``**kwargs``. Like all page handler functions, it must return an iterable for use as cherrypy.response.body. For example, to allow your 'inner' page handlers to return dicts which then get interpolated into a template:: def interpolator(next_handler, *args, **kwargs): filename = cherrypy.request.config.get('template') cherrypy.response.template = env.get_template(filename) response_dict = next_handler(*args, **kwargs) return cherrypy.response.template.render(**response_dict) cherrypy.tools.jinja = HandlerWrapperTool(interpolator) """ def __init__(self, newhandler, point='before_handler', name=None, priority=50): self.newhandler = newhandler self._point = point self._name = name self._priority = priority def callable(self, *args, **kwargs): innerfunc = cherrypy.serving.request.handler def wrap(*args, **kwargs): return self.newhandler(innerfunc, *args, **kwargs) cherrypy.serving.request.handler = wrap class ErrorTool(Tool): """Tool which is used to replace the default request.error_response.""" def __init__(self, callable, name=None): Tool.__init__(self, None, callable, name) def _wrapper(self): self.callable(**self._merged_args()) def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ cherrypy.serving.request.error_response = self._wrapper # Builtin tools # from cherrypy.lib import cptools, encoding, auth, static, jsontools from cherrypy.lib import sessions as _sessions, xmlrpcutil as _xmlrpc from cherrypy.lib import caching as _caching from cherrypy.lib import auth_basic, auth_digest class SessionTool(Tool): """Session Tool for CherryPy. sessions.locking When 'implicit' (the default), the session will be locked for you, just before running the page handler. When 'early', the session will be locked before reading the request body. This is off by default for safety reasons; for example, a large upload would block the session, denying an AJAX progress meter (`issue <https://bitbucket.org/cherrypy/cherrypy/issue/630>`_). When 'explicit' (or any other value), you need to call cherrypy.session.acquire_lock() yourself before using session data. """ def __init__(self): # _sessions.init must be bound after headers are read Tool.__init__(self, 'before_request_body', _sessions.init) def _lock_session(self): cherrypy.serving.session.acquire_lock() def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ hooks = cherrypy.serving.request.hooks conf = self._merged_args() p = conf.pop("priority", None) if p is None: p = getattr(self.callable, "priority", self._priority) hooks.attach(self._point, self.callable, priority=p, **conf) locking = conf.pop('locking', 'implicit') if locking == 'implicit': hooks.attach('before_handler', self._lock_session) elif locking == 'early': # Lock before the request body (but after _sessions.init runs!) hooks.attach('before_request_body', self._lock_session, priority=60) else: # Don't lock pass hooks.attach('before_finalize', _sessions.save) hooks.attach('on_end_request', _sessions.close) def regenerate(self): """Drop the current session and make a new one (with a new id).""" sess = cherrypy.serving.session sess.regenerate() # Grab cookie-relevant tool args conf = dict([(k, v) for k, v in self._merged_args().items() if k in ('path', 'path_header', 'name', 'timeout', 'domain', 'secure')]) _sessions.set_response_cookie(**conf) class XMLRPCController(object): """A Controller (page handler collection) for XML-RPC. To use it, have your controllers subclass this base class (it will turn on the tool for you). You can also supply the following optional config entries:: tools.xmlrpc.encoding: 'utf-8' tools.xmlrpc.allow_none: 0 XML-RPC is a rather discontinuous layer over HTTP; dispatching to the appropriate handler must first be performed according to the URL, and then a second dispatch step must take place according to the RPC method specified in the request body. It also allows a superfluous "/RPC2" prefix in the URL, supplies its own handler args in the body, and requires a 200 OK "Fault" response instead of 404 when the desired method is not found. Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone. This Controller acts as the dispatch target for the first half (based on the URL); it then reads the RPC method from the request body and does its own second dispatch step based on that method. It also reads body params, and returns a Fault on error. The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2 in your URL's, you can safely skip turning on the XMLRPCDispatcher. Otherwise, you need to use declare it in config:: request.dispatch: cherrypy.dispatch.XMLRPCDispatcher() """ # Note we're hard-coding this into the 'tools' namespace. We could do # a huge amount of work to make it relocatable, but the only reason why # would be if someone actually disabled the default_toolbox. Meh. _cp_config = {'tools.xmlrpc.on': True} def default(self, *vpath, **params): rpcparams, rpcmethod = _xmlrpc.process_body() subhandler = self for attr in str(rpcmethod).split('.'): subhandler = getattr(subhandler, attr, None) if subhandler and getattr(subhandler, "exposed", False): body = subhandler(*(vpath + rpcparams), **params) else: # https://bitbucket.org/cherrypy/cherrypy/issue/533 # if a method is not found, an xmlrpclib.Fault should be returned # raising an exception here will do that; see # cherrypy.lib.xmlrpcutil.on_error raise Exception('method "%s" is not supported' % attr) conf = cherrypy.serving.request.toolmaps['tools'].get("xmlrpc", {}) _xmlrpc.respond(body, conf.get('encoding', 'utf-8'), conf.get('allow_none', 0)) return cherrypy.serving.response.body default.exposed = True class SessionAuthTool(HandlerTool): def _setargs(self): for name in dir(cptools.SessionAuth): if not name.startswith("__"): setattr(self, name, None) class CachingTool(Tool): """Caching Tool for CherryPy.""" def _wrapper(self, **kwargs): request = cherrypy.serving.request if _caching.get(**kwargs): request.handler = None else: if request.cacheable: # Note the devious technique here of adding hooks on the fly request.hooks.attach('before_finalize', _caching.tee_output, priority=90) _wrapper.priority = 20 def _setup(self): """Hook caching into cherrypy.request.""" conf = self._merged_args() p = conf.pop("priority", None) cherrypy.serving.request.hooks.attach('before_handler', self._wrapper, priority=p, **conf) class Toolbox(object): """A collection of Tools. This object also functions as a config namespace handler for itself. Custom toolboxes should be added to each Application's toolboxes dict. """ def __init__(self, namespace): self.namespace = namespace def __setattr__(self, name, value): # If the Tool._name is None, supply it from the attribute name. if isinstance(value, Tool): if value._name is None: value._name = name value.namespace = self.namespace object.__setattr__(self, name, value) def __enter__(self): """Populate request.toolmaps from tools specified in config.""" cherrypy.serving.request.toolmaps[self.namespace] = map = {} def populate(k, v): toolname, arg = k.split(".", 1) bucket = map.setdefault(toolname, {}) bucket[arg] = v return populate def __exit__(self, exc_type, exc_val, exc_tb): """Run tool._setup() for each tool in our toolmap.""" map = cherrypy.serving.request.toolmaps.get(self.namespace) if map: for name, settings in map.items(): if settings.get("on", False): tool = getattr(self, name) tool._setup() class DeprecatedTool(Tool): _name = None warnmsg = "This Tool is deprecated." def __init__(self, point, warnmsg=None): self.point = point if warnmsg is not None: self.warnmsg = warnmsg def __call__(self, *args, **kwargs): warnings.warn(self.warnmsg) def tool_decorator(f): return f return tool_decorator def _setup(self): warnings.warn(self.warnmsg) default_toolbox = _d = Toolbox("tools") _d.session_auth = SessionAuthTool(cptools.session_auth) _d.allow = Tool('on_start_resource', cptools.allow) _d.proxy = Tool('before_request_body', cptools.proxy, priority=30) _d.response_headers = Tool('on_start_resource', cptools.response_headers) _d.log_tracebacks = Tool('before_error_response', cptools.log_traceback) _d.log_headers = Tool('before_error_response', cptools.log_request_headers) _d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100) _d.err_redirect = ErrorTool(cptools.redirect) _d.etags = Tool('before_finalize', cptools.validate_etags, priority=75) _d.decode = Tool('before_request_body', encoding.decode) # the order of encoding, gzip, caching is important _d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70) _d.gzip = Tool('before_finalize', encoding.gzip, priority=80) _d.staticdir = HandlerTool(static.staticdir) _d.staticfile = HandlerTool(static.staticfile) _d.sessions = SessionTool() _d.xmlrpc = ErrorTool(_xmlrpc.on_error) _d.caching = CachingTool('before_handler', _caching.get, 'caching') _d.expires = Tool('before_finalize', _caching.expires) _d.tidy = DeprecatedTool( 'before_finalize', "The tidy tool has been removed from the standard distribution of " "CherryPy. The most recent version can be found at " "http://tools.cherrypy.org/browser.") _d.nsgmls = DeprecatedTool( 'before_finalize', "The nsgmls tool has been removed from the standard distribution of " "CherryPy. The most recent version can be found at " "http://tools.cherrypy.org/browser.") _d.ignore_headers = Tool('before_request_body', cptools.ignore_headers) _d.referer = Tool('before_request_body', cptools.referer) _d.basic_auth = Tool('on_start_resource', auth.basic_auth) _d.digest_auth = Tool('on_start_resource', auth.digest_auth) _d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60) _d.flatten = Tool('before_finalize', cptools.flatten) _d.accept = Tool('on_start_resource', cptools.accept) _d.redirect = Tool('on_start_resource', cptools.redirect) _d.autovary = Tool('on_start_resource', cptools.autovary, priority=0) _d.json_in = Tool('before_request_body', jsontools.json_in, priority=30) _d.json_out = Tool('before_handler', jsontools.json_out, priority=30) _d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1) _d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1) del _d, cptools, encoding, auth, static
19,093
Python
.py
408
38.161765
79
0.641457
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,888
_cpconfig.py
evilhero_mylar/lib/cherrypy/_cpconfig.py
""" Configuration system for CherryPy. Configuration in CherryPy is implemented via dictionaries. Keys are strings which name the mapped value, which may be of any type. Architecture ------------ CherryPy Requests are part of an Application, which runs in a global context, and configuration data may apply to any of those three scopes: Global Configuration entries which apply everywhere are stored in cherrypy.config. Application Entries which apply to each mounted application are stored on the Application object itself, as 'app.config'. This is a two-level dict where each key is a path, or "relative URL" (for example, "/" or "/path/to/my/page"), and each value is a config dict. Usually, this data is provided in the call to tree.mount(root(), config=conf), although you may also use app.merge(conf). Request Each Request object possesses a single 'Request.config' dict. Early in the request process, this dict is populated by merging global config entries, Application entries (whose path equals or is a parent of Request.path_info), and any config acquired while looking up the page handler (see next). Declaration ----------- Configuration data may be supplied as a Python dictionary, as a filename, or as an open file object. When you supply a filename or file, CherryPy uses Python's builtin ConfigParser; you declare Application config by writing each path as a section header:: [/path/to/my/page] request.stream = True To declare global configuration entries, place them in a [global] section. You may also declare config entries directly on the classes and methods (page handlers) that make up your CherryPy application via the ``_cp_config`` attribute. For example:: class Demo: _cp_config = {'tools.gzip.on': True} def index(self): return "Hello world" index.exposed = True index._cp_config = {'request.show_tracebacks': False} .. note:: This behavior is only guaranteed for the default dispatcher. Other dispatchers may have different restrictions on where you can attach _cp_config attributes. Namespaces ---------- Configuration keys are separated into namespaces by the first "." in the key. Current namespaces: engine Controls the 'application engine', including autoreload. These can only be declared in the global config. tree Grafts cherrypy.Application objects onto cherrypy.tree. These can only be declared in the global config. hooks Declares additional request-processing functions. log Configures the logging for each application. These can only be declared in the global or / config. request Adds attributes to each Request. response Adds attributes to each Response. server Controls the default HTTP server via cherrypy.server. These can only be declared in the global config. tools Runs and configures additional request-processing packages. wsgi Adds WSGI middleware to an Application's "pipeline". These can only be declared in the app's root config ("/"). checker Controls the 'checker', which looks for common errors in app state (including config) when the engine starts. Global config only. The only key that does not exist in a namespace is the "environment" entry. This special entry 'imports' other config entries from a template stored in cherrypy._cpconfig.environments[environment]. It only applies to the global config, and only when you use cherrypy.config.update. You can define your own namespaces to be called at the Global, Application, or Request level, by adding a named handler to cherrypy.config.namespaces, app.namespaces, or app.request_class.namespaces. The name can be any string, and the handler must be either a callable or a (Python 2.5 style) context manager. """ import cherrypy from cherrypy._cpcompat import set, basestring from cherrypy.lib import reprconf # Deprecated in CherryPy 3.2--remove in 3.3 NamespaceSet = reprconf.NamespaceSet def merge(base, other): """Merge one app config (from a dict, file, or filename) into another. If the given config is a filename, it will be appended to the list of files to monitor for "autoreload" changes. """ if isinstance(other, basestring): cherrypy.engine.autoreload.files.add(other) # Load other into base for section, value_map in reprconf.as_dict(other).items(): if not isinstance(value_map, dict): raise ValueError( "Application config must include section headers, but the " "config you tried to merge doesn't have any sections. " "Wrap your config in another dict with paths as section " "headers, for example: {'/': config}.") base.setdefault(section, {}).update(value_map) class Config(reprconf.Config): """The 'global' configuration data for the entire CherryPy process.""" def update(self, config): """Update self from a dict, file or filename.""" if isinstance(config, basestring): # Filename cherrypy.engine.autoreload.files.add(config) reprconf.Config.update(self, config) def _apply(self, config): """Update self from a dict.""" if isinstance(config.get("global"), dict): if len(config) > 1: cherrypy.checker.global_config_contained_paths = True config = config["global"] if 'tools.staticdir.dir' in config: config['tools.staticdir.section'] = "global" reprconf.Config._apply(self, config) def __call__(self, *args, **kwargs): """Decorator for page handlers to set _cp_config.""" if args: raise TypeError( "The cherrypy.config decorator does not accept positional " "arguments; you must use keyword arguments.") def tool_decorator(f): if not hasattr(f, "_cp_config"): f._cp_config = {} for k, v in kwargs.items(): f._cp_config[k] = v return f return tool_decorator # Sphinx begin config.environments Config.environments = environments = { "staging": { 'engine.autoreload.on': False, 'checker.on': False, 'tools.log_headers.on': False, 'request.show_tracebacks': False, 'request.show_mismatched_params': False, }, "production": { 'engine.autoreload.on': False, 'checker.on': False, 'tools.log_headers.on': False, 'request.show_tracebacks': False, 'request.show_mismatched_params': False, 'log.screen': False, }, "embedded": { # For use with CherryPy embedded in another deployment stack. 'engine.autoreload.on': False, 'checker.on': False, 'tools.log_headers.on': False, 'request.show_tracebacks': False, 'request.show_mismatched_params': False, 'log.screen': False, 'engine.SIGHUP': None, 'engine.SIGTERM': None, }, "test_suite": { 'engine.autoreload.on': False, 'checker.on': False, 'tools.log_headers.on': False, 'request.show_tracebacks': True, 'request.show_mismatched_params': True, 'log.screen': False, }, } # Sphinx end config.environments def _server_namespace_handler(k, v): """Config handler for the "server" namespace.""" atoms = k.split(".", 1) if len(atoms) > 1: # Special-case config keys of the form 'server.servername.socket_port' # to configure additional HTTP servers. if not hasattr(cherrypy, "servers"): cherrypy.servers = {} servername, k = atoms if servername not in cherrypy.servers: from cherrypy import _cpserver cherrypy.servers[servername] = _cpserver.Server() # On by default, but 'on = False' can unsubscribe it (see below). cherrypy.servers[servername].subscribe() if k == 'on': if v: cherrypy.servers[servername].subscribe() else: cherrypy.servers[servername].unsubscribe() else: setattr(cherrypy.servers[servername], k, v) else: setattr(cherrypy.server, k, v) Config.namespaces["server"] = _server_namespace_handler def _engine_namespace_handler(k, v): """Backward compatibility handler for the "engine" namespace.""" engine = cherrypy.engine deprecated = { 'autoreload_on': 'autoreload.on', 'autoreload_frequency': 'autoreload.frequency', 'autoreload_match': 'autoreload.match', 'reload_files': 'autoreload.files', 'deadlock_poll_freq': 'timeout_monitor.frequency' } if k in deprecated: engine.log( 'WARNING: Use of engine.%s is deprecated and will be removed in a ' 'future version. Use engine.%s instead.' % (k, deprecated[k])) if k == 'autoreload_on': if v: engine.autoreload.subscribe() else: engine.autoreload.unsubscribe() elif k == 'autoreload_frequency': engine.autoreload.frequency = v elif k == 'autoreload_match': engine.autoreload.match = v elif k == 'reload_files': engine.autoreload.files = set(v) elif k == 'deadlock_poll_freq': engine.timeout_monitor.frequency = v elif k == 'SIGHUP': engine.listeners['SIGHUP'] = set([v]) elif k == 'SIGTERM': engine.listeners['SIGTERM'] = set([v]) elif "." in k: plugin, attrname = k.split(".", 1) plugin = getattr(engine, plugin) if attrname == 'on': if v and hasattr(getattr(plugin, 'subscribe', None), '__call__'): plugin.subscribe() return elif ( (not v) and hasattr(getattr(plugin, 'unsubscribe', None), '__call__') ): plugin.unsubscribe() return setattr(plugin, attrname, v) else: setattr(engine, k, v) Config.namespaces["engine"] = _engine_namespace_handler def _tree_namespace_handler(k, v): """Namespace handler for the 'tree' config namespace.""" if isinstance(v, dict): for script_name, app in v.items(): cherrypy.tree.graft(app, script_name) cherrypy.engine.log("Mounted: %s on %s" % (app, script_name or "/")) else: cherrypy.tree.graft(v, v.script_name) cherrypy.engine.log("Mounted: %s on %s" % (v, v.script_name or "/")) Config.namespaces["tree"] = _tree_namespace_handler
10,692
Python
.py
259
34.127413
79
0.660048
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,889
_cpdispatch.py
evilhero_mylar/lib/cherrypy/_cpdispatch.py
"""CherryPy dispatchers. A 'dispatcher' is the object which looks up the 'page handler' callable and collects config for the current request based on the path_info, other request attributes, and the application architecture. The core calls the dispatcher as early as possible, passing it a 'path_info' argument. The default dispatcher discovers the page handler by matching path_info to a hierarchical arrangement of objects, starting at request.app.root. """ import string import sys import types try: classtype = (type, types.ClassType) except AttributeError: classtype = type import cherrypy from cherrypy._cpcompat import set class PageHandler(object): """Callable which sets response.body.""" def __init__(self, callable, *args, **kwargs): self.callable = callable self.args = args self.kwargs = kwargs def get_args(self): return cherrypy.serving.request.args def set_args(self, args): cherrypy.serving.request.args = args return cherrypy.serving.request.args args = property( get_args, set_args, doc="The ordered args should be accessible from post dispatch hooks" ) def get_kwargs(self): return cherrypy.serving.request.kwargs def set_kwargs(self, kwargs): cherrypy.serving.request.kwargs = kwargs return cherrypy.serving.request.kwargs kwargs = property( get_kwargs, set_kwargs, doc="The named kwargs should be accessible from post dispatch hooks" ) def __call__(self): try: return self.callable(*self.args, **self.kwargs) except TypeError: x = sys.exc_info()[1] try: test_callable_spec(self.callable, self.args, self.kwargs) except cherrypy.HTTPError: raise sys.exc_info()[1] except: raise x raise def test_callable_spec(callable, callable_args, callable_kwargs): """ Inspect callable and test to see if the given args are suitable for it. When an error occurs during the handler's invoking stage there are 2 erroneous cases: 1. Too many parameters passed to a function which doesn't define one of *args or **kwargs. 2. Too little parameters are passed to the function. There are 3 sources of parameters to a cherrypy handler. 1. query string parameters are passed as keyword parameters to the handler. 2. body parameters are also passed as keyword parameters. 3. when partial matching occurs, the final path atoms are passed as positional args. Both the query string and path atoms are part of the URI. If they are incorrect, then a 404 Not Found should be raised. Conversely the body parameters are part of the request; if they are invalid a 400 Bad Request. """ show_mismatched_params = getattr( cherrypy.serving.request, 'show_mismatched_params', False) try: (args, varargs, varkw, defaults) = getargspec(callable) except TypeError: if isinstance(callable, object) and hasattr(callable, '__call__'): (args, varargs, varkw, defaults) = getargspec(callable.__call__) else: # If it wasn't one of our own types, re-raise # the original error raise if args and args[0] == 'self': args = args[1:] arg_usage = dict([(arg, 0,) for arg in args]) vararg_usage = 0 varkw_usage = 0 extra_kwargs = set() for i, value in enumerate(callable_args): try: arg_usage[args[i]] += 1 except IndexError: vararg_usage += 1 for key in callable_kwargs.keys(): try: arg_usage[key] += 1 except KeyError: varkw_usage += 1 extra_kwargs.add(key) # figure out which args have defaults. args_with_defaults = args[-len(defaults or []):] for i, val in enumerate(defaults or []): # Defaults take effect only when the arg hasn't been used yet. if arg_usage[args_with_defaults[i]] == 0: arg_usage[args_with_defaults[i]] += 1 missing_args = [] multiple_args = [] for key, usage in arg_usage.items(): if usage == 0: missing_args.append(key) elif usage > 1: multiple_args.append(key) if missing_args: # In the case where the method allows body arguments # there are 3 potential errors: # 1. not enough query string parameters -> 404 # 2. not enough body parameters -> 400 # 3. not enough path parts (partial matches) -> 404 # # We can't actually tell which case it is, # so I'm raising a 404 because that covers 2/3 of the # possibilities # # In the case where the method does not allow body # arguments it's definitely a 404. message = None if show_mismatched_params: message = "Missing parameters: %s" % ",".join(missing_args) raise cherrypy.HTTPError(404, message=message) # the extra positional arguments come from the path - 404 Not Found if not varargs and vararg_usage > 0: raise cherrypy.HTTPError(404) body_params = cherrypy.serving.request.body.params or {} body_params = set(body_params.keys()) qs_params = set(callable_kwargs.keys()) - body_params if multiple_args: if qs_params.intersection(set(multiple_args)): # If any of the multiple parameters came from the query string then # it's a 404 Not Found error = 404 else: # Otherwise it's a 400 Bad Request error = 400 message = None if show_mismatched_params: message = "Multiple values for parameters: "\ "%s" % ",".join(multiple_args) raise cherrypy.HTTPError(error, message=message) if not varkw and varkw_usage > 0: # If there were extra query string parameters, it's a 404 Not Found extra_qs_params = set(qs_params).intersection(extra_kwargs) if extra_qs_params: message = None if show_mismatched_params: message = "Unexpected query string "\ "parameters: %s" % ", ".join(extra_qs_params) raise cherrypy.HTTPError(404, message=message) # If there were any extra body parameters, it's a 400 Not Found extra_body_params = set(body_params).intersection(extra_kwargs) if extra_body_params: message = None if show_mismatched_params: message = "Unexpected body parameters: "\ "%s" % ", ".join(extra_body_params) raise cherrypy.HTTPError(400, message=message) try: import inspect except ImportError: test_callable_spec = lambda callable, args, kwargs: None else: getargspec = inspect.getargspec # Python 3 requires using getfullargspec if keyword-only arguments are present if hasattr(inspect, 'getfullargspec'): def getargspec(callable): return inspect.getfullargspec(callable)[:4] class LateParamPageHandler(PageHandler): """When passing cherrypy.request.params to the page handler, we do not want to capture that dict too early; we want to give tools like the decoding tool a chance to modify the params dict in-between the lookup of the handler and the actual calling of the handler. This subclass takes that into account, and allows request.params to be 'bound late' (it's more complicated than that, but that's the effect). """ def _get_kwargs(self): kwargs = cherrypy.serving.request.params.copy() if self._kwargs: kwargs.update(self._kwargs) return kwargs def _set_kwargs(self, kwargs): cherrypy.serving.request.kwargs = kwargs self._kwargs = kwargs kwargs = property(_get_kwargs, _set_kwargs, doc='page handler kwargs (with ' 'cherrypy.request.params copied in)') if sys.version_info < (3, 0): punctuation_to_underscores = string.maketrans( string.punctuation, '_' * len(string.punctuation)) def validate_translator(t): if not isinstance(t, str) or len(t) != 256: raise ValueError( "The translate argument must be a str of len 256.") else: punctuation_to_underscores = str.maketrans( string.punctuation, '_' * len(string.punctuation)) def validate_translator(t): if not isinstance(t, dict): raise ValueError("The translate argument must be a dict.") class Dispatcher(object): """CherryPy Dispatcher which walks a tree of objects to find a handler. The tree is rooted at cherrypy.request.app.root, and each hierarchical component in the path_info argument is matched to a corresponding nested attribute of the root object. Matching handlers must have an 'exposed' attribute which evaluates to True. The special method name "index" matches a URI which ends in a slash ("/"). The special method name "default" may match a portion of the path_info (but only when no longer substring of the path_info matches some other object). This is the default, built-in dispatcher for CherryPy. """ dispatch_method_name = '_cp_dispatch' """ The name of the dispatch method that nodes may optionally implement to provide their own dynamic dispatch algorithm. """ def __init__(self, dispatch_method_name=None, translate=punctuation_to_underscores): validate_translator(translate) self.translate = translate if dispatch_method_name: self.dispatch_method_name = dispatch_method_name def __call__(self, path_info): """Set handler and config for the current request.""" request = cherrypy.serving.request func, vpath = self.find_handler(path_info) if func: # Decode any leftover %2F in the virtual_path atoms. vpath = [x.replace("%2F", "/") for x in vpath] request.handler = LateParamPageHandler(func, *vpath) else: request.handler = cherrypy.NotFound() def find_handler(self, path): """Return the appropriate page handler, plus any virtual path. This will return two objects. The first will be a callable, which can be used to generate page output. Any parameters from the query string or request body will be sent to that callable as keyword arguments. The callable is found by traversing the application's tree, starting from cherrypy.request.app.root, and matching path components to successive objects in the tree. For example, the URL "/path/to/handler" might return root.path.to.handler. The second object returned will be a list of names which are 'virtual path' components: parts of the URL which are dynamic, and were not used when looking up the handler. These virtual path components are passed to the handler as positional arguments. """ request = cherrypy.serving.request app = request.app root = app.root dispatch_name = self.dispatch_method_name # Get config for the root object/path. fullpath = [x for x in path.strip('/').split('/') if x] + ['index'] fullpath_len = len(fullpath) segleft = fullpath_len nodeconf = {} if hasattr(root, "_cp_config"): nodeconf.update(root._cp_config) if "/" in app.config: nodeconf.update(app.config["/"]) object_trail = [['root', root, nodeconf, segleft]] node = root iternames = fullpath[:] while iternames: name = iternames[0] # map to legal Python identifiers (e.g. replace '.' with '_') objname = name.translate(self.translate) nodeconf = {} subnode = getattr(node, objname, None) pre_len = len(iternames) if subnode is None: dispatch = getattr(node, dispatch_name, None) if dispatch and hasattr(dispatch, '__call__') and not \ getattr(dispatch, 'exposed', False) and \ pre_len > 1: # Don't expose the hidden 'index' token to _cp_dispatch # We skip this if pre_len == 1 since it makes no sense # to call a dispatcher when we have no tokens left. index_name = iternames.pop() subnode = dispatch(vpath=iternames) iternames.append(index_name) else: # We didn't find a path, but keep processing in case there # is a default() handler. iternames.pop(0) else: # We found the path, remove the vpath entry iternames.pop(0) segleft = len(iternames) if segleft > pre_len: # No path segment was removed. Raise an error. raise cherrypy.CherryPyException( "A vpath segment was added. Custom dispatchers may only " + "remove elements. While trying to process " + "{0} in {1}".format(name, fullpath) ) elif segleft == pre_len: # Assume that the handler used the current path segment, but # did not pop it. This allows things like # return getattr(self, vpath[0], None) iternames.pop(0) segleft -= 1 node = subnode if node is not None: # Get _cp_config attached to this node. if hasattr(node, "_cp_config"): nodeconf.update(node._cp_config) # Mix in values from app.config for this path. existing_len = fullpath_len - pre_len if existing_len != 0: curpath = '/' + '/'.join(fullpath[0:existing_len]) else: curpath = '' new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft] for seg in new_segs: curpath += '/' + seg if curpath in app.config: nodeconf.update(app.config[curpath]) object_trail.append([name, node, nodeconf, segleft]) def set_conf(): """Collapse all object_trail config into cherrypy.request.config. """ base = cherrypy.config.copy() # Note that we merge the config from each node # even if that node was None. for name, obj, conf, segleft in object_trail: base.update(conf) if 'tools.staticdir.dir' in conf: base['tools.staticdir.section'] = '/' + \ '/'.join(fullpath[0:fullpath_len - segleft]) return base # Try successive objects (reverse order) num_candidates = len(object_trail) - 1 for i in range(num_candidates, -1, -1): name, candidate, nodeconf, segleft = object_trail[i] if candidate is None: continue # Try a "default" method on the current leaf. if hasattr(candidate, "default"): defhandler = candidate.default if getattr(defhandler, 'exposed', False): # Insert any extra _cp_config from the default handler. conf = getattr(defhandler, "_cp_config", {}) object_trail.insert( i + 1, ["default", defhandler, conf, segleft]) request.config = set_conf() # See https://bitbucket.org/cherrypy/cherrypy/issue/613 request.is_index = path.endswith("/") return defhandler, fullpath[fullpath_len - segleft:-1] # Uncomment the next line to restrict positional params to # "default". # if i < num_candidates - 2: continue # Try the current leaf. if getattr(candidate, 'exposed', False): request.config = set_conf() if i == num_candidates: # We found the extra ".index". Mark request so tools # can redirect if path_info has no trailing slash. request.is_index = True else: # We're not at an 'index' handler. Mark request so tools # can redirect if path_info has NO trailing slash. # Note that this also includes handlers which take # positional parameters (virtual paths). request.is_index = False return candidate, fullpath[fullpath_len - segleft:-1] # We didn't find anything request.config = set_conf() return None, [] class MethodDispatcher(Dispatcher): """Additional dispatch based on cherrypy.request.method.upper(). Methods named GET, POST, etc will be called on an exposed class. The method names must be all caps; the appropriate Allow header will be output showing all capitalized method names as allowable HTTP verbs. Note that the containing class must be exposed, not the methods. """ def __call__(self, path_info): """Set handler and config for the current request.""" request = cherrypy.serving.request resource, vpath = self.find_handler(path_info) if resource: # Set Allow header avail = [m for m in dir(resource) if m.isupper()] if "GET" in avail and "HEAD" not in avail: avail.append("HEAD") avail.sort() cherrypy.serving.response.headers['Allow'] = ", ".join(avail) # Find the subhandler meth = request.method.upper() func = getattr(resource, meth, None) if func is None and meth == "HEAD": func = getattr(resource, "GET", None) if func: # Grab any _cp_config on the subhandler. if hasattr(func, "_cp_config"): request.config.update(func._cp_config) # Decode any leftover %2F in the virtual_path atoms. vpath = [x.replace("%2F", "/") for x in vpath] request.handler = LateParamPageHandler(func, *vpath) else: request.handler = cherrypy.HTTPError(405) else: request.handler = cherrypy.NotFound() class RoutesDispatcher(object): """A Routes based dispatcher for CherryPy.""" def __init__(self, full_result=False, **mapper_options): """ Routes dispatcher Set full_result to True if you wish the controller and the action to be passed on to the page handler parameters. By default they won't be. """ import routes self.full_result = full_result self.controllers = {} self.mapper = routes.Mapper(**mapper_options) self.mapper.controller_scan = self.controllers.keys def connect(self, name, route, controller, **kwargs): self.controllers[name] = controller self.mapper.connect(name, route, controller=name, **kwargs) def redirect(self, url): raise cherrypy.HTTPRedirect(url) def __call__(self, path_info): """Set handler and config for the current request.""" func = self.find_handler(path_info) if func: cherrypy.serving.request.handler = LateParamPageHandler(func) else: cherrypy.serving.request.handler = cherrypy.NotFound() def find_handler(self, path_info): """Find the right page handler, and set request.config.""" import routes request = cherrypy.serving.request config = routes.request_config() config.mapper = self.mapper if hasattr(request, 'wsgi_environ'): config.environ = request.wsgi_environ config.host = request.headers.get('Host', None) config.protocol = request.scheme config.redirect = self.redirect result = self.mapper.match(path_info) config.mapper_dict = result params = {} if result: params = result.copy() if not self.full_result: params.pop('controller', None) params.pop('action', None) request.params.update(params) # Get config for the root object/path. request.config = base = cherrypy.config.copy() curpath = "" def merge(nodeconf): if 'tools.staticdir.dir' in nodeconf: nodeconf['tools.staticdir.section'] = curpath or "/" base.update(nodeconf) app = request.app root = app.root if hasattr(root, "_cp_config"): merge(root._cp_config) if "/" in app.config: merge(app.config["/"]) # Mix in values from app.config. atoms = [x for x in path_info.split("/") if x] if atoms: last = atoms.pop() else: last = None for atom in atoms: curpath = "/".join((curpath, atom)) if curpath in app.config: merge(app.config[curpath]) handler = None if result: controller = result.get('controller') controller = self.controllers.get(controller, controller) if controller: if isinstance(controller, classtype): controller = controller() # Get config from the controller. if hasattr(controller, "_cp_config"): merge(controller._cp_config) action = result.get('action') if action is not None: handler = getattr(controller, action, None) # Get config from the handler if hasattr(handler, "_cp_config"): merge(handler._cp_config) else: handler = controller # Do the last path atom here so it can # override the controller's _cp_config. if last: curpath = "/".join((curpath, last)) if curpath in app.config: merge(app.config[curpath]) return handler def XMLRPCDispatcher(next_dispatcher=Dispatcher()): from cherrypy.lib import xmlrpcutil def xmlrpc_dispatch(path_info): path_info = xmlrpcutil.patched_path(path_info) return next_dispatcher(path_info) return xmlrpc_dispatch def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains): """ Select a different handler based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different parts of a single website structure. For example:: http://www.domain.example -> root http://www.domain2.example -> root/domain2/ http://www.domain2.example:443 -> root/secure can be accomplished via the following config:: [/] request.dispatch = cherrypy.dispatch.VirtualHost( **{'www.domain2.example': '/domain2', 'www.domain2.example:443': '/secure', }) next_dispatcher The next dispatcher object in the dispatch chain. The VirtualHost dispatcher adds a prefix to the URL and calls another dispatcher. Defaults to cherrypy.dispatch.Dispatcher(). use_x_forwarded_host If True (the default), any "X-Forwarded-Host" request header will be used instead of the "Host" header. This is commonly added by HTTP servers (such as Apache) when proxying. ``**domains`` A dict of {host header value: virtual prefix} pairs. The incoming "Host" request header is looked up in this dict, and, if a match is found, the corresponding "virtual prefix" value will be prepended to the URL path before calling the next dispatcher. Note that you often need separate entries for "example.com" and "www.example.com". In addition, "Host" headers may contain the port number. """ from cherrypy.lib import httputil def vhost_dispatch(path_info): request = cherrypy.serving.request header = request.headers.get domain = header('Host', '') if use_x_forwarded_host: domain = header("X-Forwarded-Host", domain) prefix = domains.get(domain, "") if prefix: path_info = httputil.urljoin(prefix, path_info) result = next_dispatcher(path_info) # Touch up staticdir config. See # https://bitbucket.org/cherrypy/cherrypy/issue/614. section = request.config.get('tools.staticdir.section') if section: section = section[len(prefix):] request.config['tools.staticdir.section'] = section return result return vhost_dispatch
25,342
Python
.py
567
34.153439
82
0.608371
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,890
test_auth_basic.py
evilhero_mylar/lib/cherrypy/test/test_auth_basic.py
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 import cherrypy from cherrypy._cpcompat import md5, ntob from cherrypy.lib import auth_basic from cherrypy.test import helper class BasicAuthTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "This is public." index.exposed = True class BasicProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected2: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True userpassdict = {'xuser' : 'xpassword'} userhashdict = {'xuser' : md5(ntob('xpassword')).hexdigest()} def checkpasshash(realm, user, password): p = userhashdict.get(user) return p and p == md5(ntob(password)).hexdigest() or False conf = {'/basic': {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'wonderland', 'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)}, '/basic2': {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'wonderland', 'tools.auth_basic.checkpassword': checkpasshash}, } root = Root() root.basic = BasicProtected() root.basic2 = BasicProtected2() cherrypy.tree.mount(root, config=conf) setup_server = staticmethod(setup_server) def testPublic(self): self.getPage("/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('This is public.') def testBasic(self): self.getPage("/basic/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"') self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')]) self.assertStatus(401) self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')]) self.assertStatus('200 OK') self.assertBody("Hello xuser, you've been authorized.") def testBasic2(self): self.getPage("/basic2/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"') self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')]) self.assertStatus(401) self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')]) self.assertStatus('200 OK') self.assertBody("Hello xuser, you've been authorized.")
2,853
Python
.py
61
36.409836
106
0.613915
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,891
test_request_obj.py
evilhero_mylar/lib/cherrypy/test/test_request_obj.py
"""Basic tests for the cherrypy.Request object.""" import os localDir = os.path.dirname(__file__) import sys import types from cherrypy._cpcompat import IncompleteRead, ntob, unicodestr import cherrypy from cherrypy import _cptools, tools from cherrypy.lib import httputil defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "PROPFIND") # Client-side code # from cherrypy.test import helper class RequestObjectTests(helper.CPWebCase): def setup_server(): class Root: def index(self): return "hello" index.exposed = True def scheme(self): return cherrypy.request.scheme scheme.exposed = True root = Root() class TestType(type): """Metaclass which automatically exposes all functions in each subclass, and adds an instance of the subclass as an attribute of root. """ def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) for value in dct.values(): if isinstance(value, types.FunctionType): value.exposed = True setattr(root, name.lower(), cls()) class Test(object): __metaclass__ = TestType class Params(Test): def index(self, thing): return repr(thing) def ismap(self, x, y): return "Coordinates: %s, %s" % (x, y) def default(self, *args, **kwargs): return "args: %s kwargs: %s" % (args, kwargs) default._cp_config = {'request.query_string_encoding': 'latin1'} class ParamErrorsCallable(object): exposed = True def __call__(self): return "data" class ParamErrors(Test): def one_positional(self, param1): return "data" one_positional.exposed = True def one_positional_args(self, param1, *args): return "data" one_positional_args.exposed = True def one_positional_args_kwargs(self, param1, *args, **kwargs): return "data" one_positional_args_kwargs.exposed = True def one_positional_kwargs(self, param1, **kwargs): return "data" one_positional_kwargs.exposed = True def no_positional(self): return "data" no_positional.exposed = True def no_positional_args(self, *args): return "data" no_positional_args.exposed = True def no_positional_args_kwargs(self, *args, **kwargs): return "data" no_positional_args_kwargs.exposed = True def no_positional_kwargs(self, **kwargs): return "data" no_positional_kwargs.exposed = True callable_object = ParamErrorsCallable() def raise_type_error(self, **kwargs): raise TypeError("Client Error") raise_type_error.exposed = True def raise_type_error_with_default_param(self, x, y=None): return '%d' % 'a' # throw an exception raise_type_error_with_default_param.exposed = True def callable_error_page(status, **kwargs): return "Error %s - Well, I'm very sorry but you haven't paid!" % status class Error(Test): _cp_config = {'tools.log_tracebacks.on': True, } def reason_phrase(self): raise cherrypy.HTTPError("410 Gone fishin'") def custom(self, err='404'): raise cherrypy.HTTPError(int(err), "No, <b>really</b>, not found!") custom._cp_config = {'error_page.404': os.path.join(localDir, "static/index.html"), 'error_page.401': callable_error_page, } def custom_default(self): return 1 + 'a' # raise an unexpected error custom_default._cp_config = {'error_page.default': callable_error_page} def noexist(self): raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!") noexist._cp_config = {'error_page.404': "nonexistent.html"} def page_method(self): raise ValueError() def page_yield(self): yield "howdy" raise ValueError() def page_streamed(self): yield "word up" raise ValueError() yield "very oops" page_streamed._cp_config = {"response.stream": True} def cause_err_in_finalize(self): # Since status must start with an int, this should error. cherrypy.response.status = "ZOO OK" cause_err_in_finalize._cp_config = {'request.show_tracebacks': False} def rethrow(self): """Test that an error raised here will be thrown out to the server.""" raise ValueError() rethrow._cp_config = {'request.throw_errors': True} class Expect(Test): def expectation_failed(self): expect = cherrypy.request.headers.elements("Expect") if expect and expect[0].value != '100-continue': raise cherrypy.HTTPError(400) raise cherrypy.HTTPError(417, 'Expectation Failed') class Headers(Test): def default(self, headername): """Spit back out the value for the requested header.""" return cherrypy.request.headers[headername] def doubledheaders(self): # From http://www.cherrypy.org/ticket/165: # "header field names should not be case sensitive sayes the rfc. # if i set a headerfield in complete lowercase i end up with two # header fields, one in lowercase, the other in mixed-case." # Set the most common headers hMap = cherrypy.response.headers hMap['content-type'] = "text/html" hMap['content-length'] = 18 hMap['server'] = 'CherryPy headertest' hMap['location'] = ('%s://%s:%s/headers/' % (cherrypy.request.local.ip, cherrypy.request.local.port, cherrypy.request.scheme)) # Set a rare header for fun hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT' return "double header test" def ifmatch(self): val = cherrypy.request.headers['If-Match'] assert isinstance(val, unicodestr) cherrypy.response.headers['ETag'] = val return val class HeaderElements(Test): def get_elements(self, headername): e = cherrypy.request.headers.elements(headername) return "\n".join([unicodestr(x) for x in e]) class Method(Test): def index(self): m = cherrypy.request.method if m in defined_http_methods or m == "CONNECT": return m if m == "LINK": raise cherrypy.HTTPError(405) else: raise cherrypy.HTTPError(501) def parameterized(self, data): return data def request_body(self): # This should be a file object (temp file), # which CP will just pipe back out if we tell it to. return cherrypy.request.body def reachable(self): return "success" class Divorce: """HTTP Method handlers shouldn't collide with normal method names. For example, a GET-handler shouldn't collide with a method named 'get'. If you build HTTP method dispatching into CherryPy, rewrite this class to use your new dispatch mechanism and make sure that: "GET /divorce HTTP/1.1" maps to divorce.index() and "GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get() """ documents = {} def index(self): yield "<h1>Choose your document</h1>\n" yield "<ul>\n" for id, contents in self.documents.items(): yield (" <li><a href='/divorce/get?ID=%s'>%s</a>: %s</li>\n" % (id, id, contents)) yield "</ul>" index.exposed = True def get(self, ID): return ("Divorce document %s: %s" % (ID, self.documents.get(ID, "empty"))) get.exposed = True root.divorce = Divorce() class ThreadLocal(Test): def index(self): existing = repr(getattr(cherrypy.request, "asdf", None)) cherrypy.request.asdf = "rassfrassin" return existing appconf = { '/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")}, } cherrypy.tree.mount(root, config=appconf) setup_server = staticmethod(setup_server) def test_scheme(self): self.getPage("/scheme") self.assertBody(self.scheme) def testParams(self): self.getPage("/params/?thing=a") self.assertBody("u'a'") self.getPage("/params/?thing=a&thing=b&thing=c") self.assertBody("[u'a', u'b', u'c']") # Test friendly error message when given params are not accepted. cherrypy.config.update({"request.show_mismatched_params": True}) self.getPage("/params/?notathing=meeting") self.assertInBody("Missing parameters: thing") self.getPage("/params/?thing=meeting&notathing=meeting") self.assertInBody("Unexpected query string parameters: notathing") # Test ability to turn off friendly error messages cherrypy.config.update({"request.show_mismatched_params": False}) self.getPage("/params/?notathing=meeting") self.assertInBody("Not Found") self.getPage("/params/?thing=meeting&notathing=meeting") self.assertInBody("Not Found") # Test "% HEX HEX"-encoded URL, param keys, and values self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville") self.assertBody(r"args: ('\xd4 \xe3', 'cheese') " r"kwargs: {'Gruy\xe8re': u'Bulgn\xe9ville'}") # Make sure that encoded = and & get parsed correctly self.getPage("/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2") self.assertBody(r"args: ('code',) " r"kwargs: {'url': u'http://cherrypy.org/index?a=1&b=2'}") # Test coordinates sent by <img ismap> self.getPage("/params/ismap?223,114") self.assertBody("Coordinates: 223, 114") # Test "name[key]" dict-like params self.getPage("/params/dictlike?a[1]=1&a[2]=2&b=foo&b[bar]=baz") self.assertBody( "args: ('dictlike',) " "kwargs: {'a[1]': u'1', 'b[bar]': u'baz', 'b': u'foo', 'a[2]': u'2'}") def testParamErrors(self): # test that all of the handlers work when given # the correct parameters in order to ensure that the # errors below aren't coming from some other source. for uri in ( '/paramerrors/one_positional?param1=foo', '/paramerrors/one_positional_args?param1=foo', '/paramerrors/one_positional_args/foo', '/paramerrors/one_positional_args/foo/bar/baz', '/paramerrors/one_positional_args_kwargs?param1=foo&param2=bar', '/paramerrors/one_positional_args_kwargs/foo?param2=bar&param3=baz', '/paramerrors/one_positional_args_kwargs/foo/bar/baz?param2=bar&param3=baz', '/paramerrors/one_positional_kwargs?param1=foo&param2=bar&param3=baz', '/paramerrors/one_positional_kwargs/foo?param4=foo&param2=bar&param3=baz', '/paramerrors/no_positional', '/paramerrors/no_positional_args/foo', '/paramerrors/no_positional_args/foo/bar/baz', '/paramerrors/no_positional_args_kwargs?param1=foo&param2=bar', '/paramerrors/no_positional_args_kwargs/foo?param2=bar', '/paramerrors/no_positional_args_kwargs/foo/bar/baz?param2=bar&param3=baz', '/paramerrors/no_positional_kwargs?param1=foo&param2=bar', '/paramerrors/callable_object', ): self.getPage(uri) self.assertStatus(200) # query string parameters are part of the URI, so if they are wrong # for a particular handler, the status MUST be a 404. error_msgs = [ 'Missing parameters', 'Nothing matches the given URI', 'Multiple values for parameters', 'Unexpected query string parameters', 'Unexpected body parameters', ] for uri, msg in ( ('/paramerrors/one_positional', error_msgs[0]), ('/paramerrors/one_positional?foo=foo', error_msgs[0]), ('/paramerrors/one_positional/foo/bar/baz', error_msgs[1]), ('/paramerrors/one_positional/foo?param1=foo', error_msgs[2]), ('/paramerrors/one_positional/foo?param1=foo&param2=foo', error_msgs[2]), ('/paramerrors/one_positional_args/foo?param1=foo&param2=foo', error_msgs[2]), ('/paramerrors/one_positional_args/foo/bar/baz?param2=foo', error_msgs[3]), ('/paramerrors/one_positional_args_kwargs/foo/bar/baz?param1=bar&param3=baz', error_msgs[2]), ('/paramerrors/one_positional_kwargs/foo?param1=foo&param2=bar&param3=baz', error_msgs[2]), ('/paramerrors/no_positional/boo', error_msgs[1]), ('/paramerrors/no_positional?param1=foo', error_msgs[3]), ('/paramerrors/no_positional_args/boo?param1=foo', error_msgs[3]), ('/paramerrors/no_positional_kwargs/boo?param1=foo', error_msgs[1]), ('/paramerrors/callable_object?param1=foo', error_msgs[3]), ('/paramerrors/callable_object/boo', error_msgs[1]), ): for show_mismatched_params in (True, False): cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params}) self.getPage(uri) self.assertStatus(404) if show_mismatched_params: self.assertInBody(msg) else: self.assertInBody("Not Found") # if body parameters are wrong, a 400 must be returned. for uri, body, msg in ( ('/paramerrors/one_positional/foo', 'param1=foo', error_msgs[2]), ('/paramerrors/one_positional/foo', 'param1=foo&param2=foo', error_msgs[2]), ('/paramerrors/one_positional_args/foo', 'param1=foo&param2=foo', error_msgs[2]), ('/paramerrors/one_positional_args/foo/bar/baz', 'param2=foo', error_msgs[4]), ('/paramerrors/one_positional_args_kwargs/foo/bar/baz', 'param1=bar&param3=baz', error_msgs[2]), ('/paramerrors/one_positional_kwargs/foo', 'param1=foo&param2=bar&param3=baz', error_msgs[2]), ('/paramerrors/no_positional', 'param1=foo', error_msgs[4]), ('/paramerrors/no_positional_args/boo', 'param1=foo', error_msgs[4]), ('/paramerrors/callable_object', 'param1=foo', error_msgs[4]), ): for show_mismatched_params in (True, False): cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params}) self.getPage(uri, method='POST', body=body) self.assertStatus(400) if show_mismatched_params: self.assertInBody(msg) else: self.assertInBody("Bad Request") # even if body parameters are wrong, if we get the uri wrong, then # it's a 404 for uri, body, msg in ( ('/paramerrors/one_positional?param2=foo', 'param1=foo', error_msgs[3]), ('/paramerrors/one_positional/foo/bar', 'param2=foo', error_msgs[1]), ('/paramerrors/one_positional_args/foo/bar?param2=foo', 'param3=foo', error_msgs[3]), ('/paramerrors/one_positional_kwargs/foo/bar', 'param2=bar&param3=baz', error_msgs[1]), ('/paramerrors/no_positional?param1=foo', 'param2=foo', error_msgs[3]), ('/paramerrors/no_positional_args/boo?param2=foo', 'param1=foo', error_msgs[3]), ('/paramerrors/callable_object?param2=bar', 'param1=foo', error_msgs[3]), ): for show_mismatched_params in (True, False): cherrypy.config.update({'request.show_mismatched_params': show_mismatched_params}) self.getPage(uri, method='POST', body=body) self.assertStatus(404) if show_mismatched_params: self.assertInBody(msg) else: self.assertInBody("Not Found") # In the case that a handler raises a TypeError we should # let that type error through. for uri in ( '/paramerrors/raise_type_error', '/paramerrors/raise_type_error_with_default_param?x=0', '/paramerrors/raise_type_error_with_default_param?x=0&y=0', ): self.getPage(uri, method='GET') self.assertStatus(500) self.assertTrue('Client Error', self.body) def testErrorHandling(self): self.getPage("/error/missing") self.assertStatus(404) self.assertErrorPage(404, "The path '/error/missing' was not found.") ignore = helper.webtest.ignored_exceptions ignore.append(ValueError) try: valerr = '\n raise ValueError()\nValueError' self.getPage("/error/page_method") self.assertErrorPage(500, pattern=valerr) self.getPage("/error/page_yield") self.assertErrorPage(500, pattern=valerr) if (cherrypy.server.protocol_version == "HTTP/1.0" or getattr(cherrypy.server, "using_apache", False)): self.getPage("/error/page_streamed") # Because this error is raised after the response body has # started, the status should not change to an error status. self.assertStatus(200) self.assertBody("word up") else: # Under HTTP/1.1, the chunked transfer-coding is used. # The HTTP client will choke when the output is incomplete. self.assertRaises((ValueError, IncompleteRead), self.getPage, "/error/page_streamed") # No traceback should be present self.getPage("/error/cause_err_in_finalize") msg = "Illegal response status from server ('ZOO' is non-numeric)." self.assertErrorPage(500, msg, None) finally: ignore.pop() # Test HTTPError with a reason-phrase in the status arg. self.getPage('/error/reason_phrase') self.assertStatus("410 Gone fishin'") # Test custom error page for a specific error. self.getPage("/error/custom") self.assertStatus(404) self.assertBody("Hello, world\r\n" + (" " * 499)) # Test custom error page for a specific error. self.getPage("/error/custom?err=401") self.assertStatus(401) self.assertBody("Error 401 Unauthorized - Well, I'm very sorry but you haven't paid!") # Test default custom error page. self.getPage("/error/custom_default") self.assertStatus(500) self.assertBody("Error 500 Internal Server Error - Well, I'm very sorry but you haven't paid!".ljust(513)) # Test error in custom error page (ticket #305). # Note that the message is escaped for HTML (ticket #310). self.getPage("/error/noexist") self.assertStatus(404) msg = ("No, &lt;b&gt;really&lt;/b&gt;, not found!<br />" "In addition, the custom error page failed:\n<br />" "IOError: [Errno 2] No such file or directory: 'nonexistent.html'") self.assertInBody(msg) if getattr(cherrypy.server, "using_apache", False): pass else: # Test throw_errors (ticket #186). self.getPage("/error/rethrow") self.assertInBody("raise ValueError()") def testExpect(self): e = ('Expect', '100-continue') self.getPage("/headerelements/get_elements?headername=Expect", [e]) self.assertBody('100-continue') self.getPage("/expect/expectation_failed", [e]) self.assertStatus(417) def testHeaderElements(self): # Accept-* header elements should be sorted, with most preferred first. h = [('Accept', 'audio/*; q=0.2, audio/basic')] self.getPage("/headerelements/get_elements?headername=Accept", h) self.assertStatus(200) self.assertBody("audio/basic\n" "audio/*;q=0.2") h = [('Accept', 'text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c')] self.getPage("/headerelements/get_elements?headername=Accept", h) self.assertStatus(200) self.assertBody("text/x-c\n" "text/html\n" "text/x-dvi;q=0.8\n" "text/plain;q=0.5") # Test that more specific media ranges get priority. h = [('Accept', 'text/*, text/html, text/html;level=1, */*')] self.getPage("/headerelements/get_elements?headername=Accept", h) self.assertStatus(200) self.assertBody("text/html;level=1\n" "text/html\n" "text/*\n" "*/*") # Test Accept-Charset h = [('Accept-Charset', 'iso-8859-5, unicode-1-1;q=0.8')] self.getPage("/headerelements/get_elements?headername=Accept-Charset", h) self.assertStatus("200 OK") self.assertBody("iso-8859-5\n" "unicode-1-1;q=0.8") # Test Accept-Encoding h = [('Accept-Encoding', 'gzip;q=1.0, identity; q=0.5, *;q=0')] self.getPage("/headerelements/get_elements?headername=Accept-Encoding", h) self.assertStatus("200 OK") self.assertBody("gzip;q=1.0\n" "identity;q=0.5\n" "*;q=0") # Test Accept-Language h = [('Accept-Language', 'da, en-gb;q=0.8, en;q=0.7')] self.getPage("/headerelements/get_elements?headername=Accept-Language", h) self.assertStatus("200 OK") self.assertBody("da\n" "en-gb;q=0.8\n" "en;q=0.7") # Test malformed header parsing. See http://www.cherrypy.org/ticket/763. self.getPage("/headerelements/get_elements?headername=Content-Type", # Note the illegal trailing ";" headers=[('Content-Type', 'text/html; charset=utf-8;')]) self.assertStatus(200) self.assertBody("text/html;charset=utf-8") def test_repeated_headers(self): # Test that two request headers are collapsed into one. # See http://www.cherrypy.org/ticket/542. self.getPage("/headers/Accept-Charset", headers=[("Accept-Charset", "iso-8859-5"), ("Accept-Charset", "unicode-1-1;q=0.8")]) self.assertBody("iso-8859-5, unicode-1-1;q=0.8") # Tests that each header only appears once, regardless of case. self.getPage("/headers/doubledheaders") self.assertBody("double header test") hnames = [name.title() for name, val in self.headers] for key in ['Content-Length', 'Content-Type', 'Date', 'Expires', 'Location', 'Server']: self.assertEqual(hnames.count(key), 1, self.headers) def test_encoded_headers(self): # First, make sure the innards work like expected. self.assertEqual(httputil.decode_TEXT(u"=?utf-8?q?f=C3=BCr?="), u"f\xfcr") if cherrypy.server.protocol_version == "HTTP/1.1": # Test RFC-2047-encoded request and response header values u = u'\u212bngstr\xf6m' c = u"=E2=84=ABngstr=C3=B6m" self.getPage("/headers/ifmatch", [('If-Match', u'=?utf-8?q?%s?=' % c)]) # The body should be utf-8 encoded. self.assertBody("\xe2\x84\xabngstr\xc3\xb6m") # But the Etag header should be RFC-2047 encoded (binary) self.assertHeader("ETag", u'=?utf-8?b?4oSrbmdzdHLDtm0=?=') # Test a *LONG* RFC-2047-encoded request and response header value self.getPage("/headers/ifmatch", [('If-Match', u'=?utf-8?q?%s?=' % (c * 10))]) self.assertBody("\xe2\x84\xabngstr\xc3\xb6m" * 10) # Note: this is different output for Python3, but it decodes fine. etag = self.assertHeader("ETag", '=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt' '4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt' '4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt' '4oSrbmdzdHLDtm0=?=') self.assertEqual(httputil.decode_TEXT(etag), u * 10) def test_header_presence(self): # If we don't pass a Content-Type header, it should not be present # in cherrypy.request.headers self.getPage("/headers/Content-Type", headers=[]) self.assertStatus(500) # If Content-Type is present in the request, it should be present in # cherrypy.request.headers self.getPage("/headers/Content-Type", headers=[("Content-type", "application/json")]) self.assertBody("application/json") def test_basic_HTTPMethods(self): helper.webtest.methods_with_bodies = ("POST", "PUT", "PROPFIND") # Test that all defined HTTP methods work. for m in defined_http_methods: self.getPage("/method/", method=m) # HEAD requests should not return any body. if m == "HEAD": self.assertBody("") elif m == "TRACE": # Some HTTP servers (like modpy) have their own TRACE support self.assertEqual(self.body[:5], ntob("TRACE")) else: self.assertBody(m) # Request a PUT method with a form-urlencoded body self.getPage("/method/parameterized", method="PUT", body="data=on+top+of+other+things") self.assertBody("on top of other things") # Request a PUT method with a file body b = "one thing on top of another" h = [("Content-Type", "text/plain"), ("Content-Length", str(len(b)))] self.getPage("/method/request_body", headers=h, method="PUT", body=b) self.assertStatus(200) self.assertBody(b) # Request a PUT method with a file body but no Content-Type. # See http://www.cherrypy.org/ticket/790. b = ntob("one thing on top of another") self.persistent = True try: conn = self.HTTP_CONN conn.putrequest("PUT", "/method/request_body", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader('Content-Length', str(len(b))) conn.endheaders() conn.send(b) response = conn.response_class(conn.sock, method="PUT") response.begin() self.assertEqual(response.status, 200) self.body = response.read() self.assertBody(b) finally: self.persistent = False # Request a PUT method with no body whatsoever (not an empty one). # See http://www.cherrypy.org/ticket/650. # Provide a C-T or webtest will provide one (and a C-L) for us. h = [("Content-Type", "text/plain")] self.getPage("/method/reachable", headers=h, method="PUT") self.assertStatus(411) # Request a custom method with a request body b = ('<?xml version="1.0" encoding="utf-8" ?>\n\n' '<propfind xmlns="DAV:"><prop><getlastmodified/>' '</prop></propfind>') h = [('Content-Type', 'text/xml'), ('Content-Length', str(len(b)))] self.getPage("/method/request_body", headers=h, method="PROPFIND", body=b) self.assertStatus(200) self.assertBody(b) # Request a disallowed method self.getPage("/method/", method="LINK") self.assertStatus(405) # Request an unknown method self.getPage("/method/", method="SEARCH") self.assertStatus(501) # For method dispatchers: make sure that an HTTP method doesn't # collide with a virtual path atom. If you build HTTP-method # dispatching into the core, rewrite these handlers to use # your dispatch idioms. self.getPage("/divorce/get?ID=13") self.assertBody('Divorce document 13: empty') self.assertStatus(200) self.getPage("/divorce/", method="GET") self.assertBody('<h1>Choose your document</h1>\n<ul>\n</ul>') self.assertStatus(200) def test_CONNECT_method(self): if getattr(cherrypy.server, "using_apache", False): return self.skip("skipped due to known Apache differences... ") self.getPage("/method/", method="CONNECT") self.assertBody("CONNECT") def testEmptyThreadlocals(self): results = [] for x in range(20): self.getPage("/threadlocal/") results.append(self.body) self.assertEqual(results, [ntob("None")] * 20)
31,327
Python
.py
589
38.320883
114
0.571765
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,892
_test_decorators.py
evilhero_mylar/lib/cherrypy/test/_test_decorators.py
"""Test module for the @-decorator syntax, which is version-specific""" from cherrypy import expose, tools from cherrypy._cpcompat import ntob class ExposeExamples(object): @expose def no_call(self): return "Mr E. R. Bradshaw" @expose() def call_empty(self): return "Mrs. B.J. Smegma" @expose("call_alias") def nesbitt(self): return "Mr Nesbitt" @expose(["alias1", "alias2"]) def andrews(self): return "Mr Ken Andrews" @expose(alias="alias3") def watson(self): return "Mr. and Mrs. Watson" class ToolExamples(object): @expose @tools.response_headers(headers=[('Content-Type', 'application/data')]) def blah(self): yield ntob("blah") # This is here to demonstrate that _cp_config = {...} overwrites # the _cp_config attribute added by the Tool decorator. You have # to write _cp_config[k] = v or _cp_config.update(...) instead. blah._cp_config['response.stream'] = True
1,022
Python
.py
28
30.035714
75
0.656217
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,893
test_config_server.py
evilhero_mylar/lib/cherrypy/test/test_config_server.py
"""Tests for the CherryPy configuration system.""" import os, sys localDir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import socket import time import cherrypy # Client-side code # from cherrypy.test import helper class ServerConfigTests(helper.CPWebCase): def setup_server(): class Root: def index(self): return cherrypy.request.wsgi_environ['SERVER_PORT'] index.exposed = True def upload(self, file): return "Size: %s" % len(file.file.read()) upload.exposed = True def tinyupload(self): return cherrypy.request.body.read() tinyupload.exposed = True tinyupload._cp_config = {'request.body.maxbytes': 100} cherrypy.tree.mount(Root()) cherrypy.config.update({ 'server.socket_host': '0.0.0.0', 'server.socket_port': 9876, 'server.max_request_body_size': 200, 'server.max_request_header_size': 500, 'server.socket_timeout': 0.5, # Test explicit server.instance 'server.2.instance': 'cherrypy._cpwsgi_server.CPWSGIServer', 'server.2.socket_port': 9877, # Test non-numeric <servername> # Also test default server.instance = builtin server 'server.yetanother.socket_port': 9878, }) setup_server = staticmethod(setup_server) PORT = 9876 def testBasicConfig(self): self.getPage("/") self.assertBody(str(self.PORT)) def testAdditionalServers(self): if self.scheme == 'https': return self.skip("not available under ssl") self.PORT = 9877 self.getPage("/") self.assertBody(str(self.PORT)) self.PORT = 9878 self.getPage("/") self.assertBody(str(self.PORT)) def testMaxRequestSizePerHandler(self): if getattr(cherrypy.server, "using_apache", False): return self.skip("skipped due to known Apache differences... ") self.getPage('/tinyupload', method="POST", headers=[('Content-Type', 'text/plain'), ('Content-Length', '100')], body="x" * 100) self.assertStatus(200) self.assertBody("x" * 100) self.getPage('/tinyupload', method="POST", headers=[('Content-Type', 'text/plain'), ('Content-Length', '101')], body="x" * 101) self.assertStatus(413) def testMaxRequestSize(self): if getattr(cherrypy.server, "using_apache", False): return self.skip("skipped due to known Apache differences... ") for size in (500, 5000, 50000): self.getPage("/", headers=[('From', "x" * 500)]) self.assertStatus(413) # Test for http://www.cherrypy.org/ticket/421 # (Incorrect border condition in readline of SizeCheckWrapper). # This hangs in rev 891 and earlier. lines256 = "x" * 248 self.getPage("/", headers=[('Host', '%s:%s' % (self.HOST, self.PORT)), ('From', lines256)]) # Test upload body = '\r\n'.join([ '--x', 'Content-Disposition: form-data; name="file"; filename="hello.txt"', 'Content-Type: text/plain', '', '%s', '--x--']) partlen = 200 - len(body) b = body % ("x" * partlen) h = [("Content-type", "multipart/form-data; boundary=x"), ("Content-Length", "%s" % len(b))] self.getPage('/upload', h, "POST", b) self.assertBody('Size: %d' % partlen) b = body % ("x" * 200) h = [("Content-type", "multipart/form-data; boundary=x"), ("Content-Length", "%s" % len(b))] self.getPage('/upload', h, "POST", b) self.assertStatus(413)
4,177
Python
.py
95
31.273684
80
0.541879
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,894
test_routes.py
evilhero_mylar/lib/cherrypy/test/test_routes.py
import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import cherrypy from cherrypy.test import helper import nose class RoutesDispatchTest(helper.CPWebCase): def setup_server(): try: import routes except ImportError: raise nose.SkipTest('Install routes to test RoutesDispatcher code') class Dummy: def index(self): return "I said good day!" class City: def __init__(self, name): self.name = name self.population = 10000 def index(self, **kwargs): return "Welcome to %s, pop. %s" % (self.name, self.population) index._cp_config = {'tools.response_headers.on': True, 'tools.response_headers.headers': [('Content-Language', 'en-GB')]} def update(self, **kwargs): self.population = kwargs['pop'] return "OK" d = cherrypy.dispatch.RoutesDispatcher() d.connect(action='index', name='hounslow', route='/hounslow', controller=City('Hounslow')) d.connect(name='surbiton', route='/surbiton', controller=City('Surbiton'), action='index', conditions=dict(method=['GET'])) d.mapper.connect('/surbiton', controller='surbiton', action='update', conditions=dict(method=['POST'])) d.connect('main', ':action', controller=Dummy()) conf = {'/': {'request.dispatch': d}} cherrypy.tree.mount(root=None, config=conf) setup_server = staticmethod(setup_server) def test_Routes_Dispatch(self): self.getPage("/hounslow") self.assertStatus("200 OK") self.assertBody("Welcome to Hounslow, pop. 10000") self.getPage("/foo") self.assertStatus("404 Not Found") self.getPage("/surbiton") self.assertStatus("200 OK") self.assertBody("Welcome to Surbiton, pop. 10000") self.getPage("/surbiton", method="POST", body="pop=1327") self.assertStatus("200 OK") self.assertBody("OK") self.getPage("/surbiton") self.assertStatus("200 OK") self.assertHeader("Content-Language", "en-GB") self.assertBody("Welcome to Surbiton, pop. 1327")
2,411
Python
.py
52
33.865385
98
0.590506
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,895
test_wsgi_ns.py
evilhero_mylar/lib/cherrypy/test/test_wsgi_ns.py
import cherrypy from cherrypy.test import helper class WSGI_Namespace_Test(helper.CPWebCase): def setup_server(): class WSGIResponse(object): def __init__(self, appresults): self.appresults = appresults self.iter = iter(appresults) def __iter__(self): return self def next(self): return self.iter.next() def close(self): if hasattr(self.appresults, "close"): self.appresults.close() class ChangeCase(object): def __init__(self, app, to=None): self.app = app self.to = to def __call__(self, environ, start_response): res = self.app(environ, start_response) class CaseResults(WSGIResponse): def next(this): return getattr(this.iter.next(), self.to)() return CaseResults(res) class Replacer(object): def __init__(self, app, map={}): self.app = app self.map = map def __call__(self, environ, start_response): res = self.app(environ, start_response) class ReplaceResults(WSGIResponse): def next(this): line = this.iter.next() for k, v in self.map.iteritems(): line = line.replace(k, v) return line return ReplaceResults(res) class Root(object): def index(self): return "HellO WoRlD!" index.exposed = True root_conf = {'wsgi.pipeline': [('replace', Replacer)], 'wsgi.replace.map': {'L': 'X', 'l': 'r'}, } app = cherrypy.Application(Root()) app.wsgiapp.pipeline.append(('changecase', ChangeCase)) app.wsgiapp.config['changecase'] = {'to': 'upper'} cherrypy.tree.mount(app, config={'/': root_conf}) setup_server = staticmethod(setup_server) def test_pipeline(self): if not cherrypy.server.httpserver: return self.skip() self.getPage("/") # If body is "HEXXO WORXD!", the middleware was applied out of order. self.assertBody("HERRO WORRD!")
2,570
Python
.py
56
28.142857
77
0.512142
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,896
test_tools.py
evilhero_mylar/lib/cherrypy/test/test_tools.py
"""Test the various means of instantiating and invoking tools.""" import gzip import sys from cherrypy._cpcompat import BytesIO, copyitems, itervalues, IncompleteRead, ntob, ntou, xrange import time timeout = 0.2 import types import cherrypy from cherrypy import tools europoundUnicode = ntou('\x80\xa3') # Client-side code # from cherrypy.test import helper class ToolTests(helper.CPWebCase): def setup_server(): # Put check_access in a custom toolbox with its own namespace myauthtools = cherrypy._cptools.Toolbox("myauth") def check_access(default=False): if not getattr(cherrypy.request, "userid", default): raise cherrypy.HTTPError(401) myauthtools.check_access = cherrypy.Tool('before_request_body', check_access) def numerify(): def number_it(body): for chunk in body: for k, v in cherrypy.request.numerify_map: chunk = chunk.replace(k, v) yield chunk cherrypy.response.body = number_it(cherrypy.response.body) class NumTool(cherrypy.Tool): def _setup(self): def makemap(): m = self._merged_args().get("map", {}) cherrypy.request.numerify_map = copyitems(m) cherrypy.request.hooks.attach('on_start_resource', makemap) def critical(): cherrypy.request.error_response = cherrypy.HTTPError(502).set_response critical.failsafe = True cherrypy.request.hooks.attach('on_start_resource', critical) cherrypy.request.hooks.attach(self._point, self.callable) tools.numerify = NumTool('before_finalize', numerify) # It's not mandatory to inherit from cherrypy.Tool. class NadsatTool: def __init__(self): self.ended = {} self._name = "nadsat" def nadsat(self): def nadsat_it_up(body): for chunk in body: chunk = chunk.replace(ntob("good"), ntob("horrorshow")) chunk = chunk.replace(ntob("piece"), ntob("lomtick")) yield chunk cherrypy.response.body = nadsat_it_up(cherrypy.response.body) nadsat.priority = 0 def cleanup(self): # This runs after the request has been completely written out. cherrypy.response.body = [ntob("razdrez")] id = cherrypy.request.params.get("id") if id: self.ended[id] = True cleanup.failsafe = True def _setup(self): cherrypy.request.hooks.attach('before_finalize', self.nadsat) cherrypy.request.hooks.attach('on_end_request', self.cleanup) tools.nadsat = NadsatTool() def pipe_body(): cherrypy.request.process_request_body = False clen = int(cherrypy.request.headers['Content-Length']) cherrypy.request.body = cherrypy.request.rfile.read(clen) # Assert that we can use a callable object instead of a function. class Rotator(object): def __call__(self, scale): r = cherrypy.response r.collapse_body() r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]] cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator()) def stream_handler(next_handler, *args, **kwargs): cherrypy.response.output = o = BytesIO() try: response = next_handler(*args, **kwargs) # Ignore the response and return our accumulated output instead. return o.getvalue() finally: o.close() cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(stream_handler) class Root: def index(self): return "Howdy earth!" index.exposed = True def tarfile(self): cherrypy.response.output.write(ntob('I am ')) cherrypy.response.output.write(ntob('a tarfile')) tarfile.exposed = True tarfile._cp_config = {'tools.streamer.on': True} def euro(self): hooks = list(cherrypy.request.hooks['before_finalize']) hooks.sort() cbnames = [x.callback.__name__ for x in hooks] assert cbnames == ['gzip'], cbnames priorities = [x.priority for x in hooks] assert priorities == [80], priorities yield ntou("Hello,") yield ntou("world") yield europoundUnicode euro.exposed = True # Bare hooks def pipe(self): return cherrypy.request.body pipe.exposed = True pipe._cp_config = {'hooks.before_request_body': pipe_body} # Multiple decorators; include kwargs just for fun. # Note that rotator must run before gzip. def decorated_euro(self, *vpath): yield ntou("Hello,") yield ntou("world") yield europoundUnicode decorated_euro.exposed = True decorated_euro = tools.gzip(compress_level=6)(decorated_euro) decorated_euro = tools.rotator(scale=3)(decorated_euro) root = Root() class TestType(type): """Metaclass which automatically exposes all functions in each subclass, and adds an instance of the subclass as an attribute of root. """ def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) for value in itervalues(dct): if isinstance(value, types.FunctionType): value.exposed = True setattr(root, name.lower(), cls()) class Test(object): __metaclass__ = TestType # METHOD ONE: # Declare Tools in _cp_config class Demo(Test): _cp_config = {"tools.nadsat.on": True} def index(self, id=None): return "A good piece of cherry pie" def ended(self, id): return repr(tools.nadsat.ended[id]) def err(self, id=None): raise ValueError() def errinstream(self, id=None): yield "nonconfidential" raise ValueError() yield "confidential" # METHOD TWO: decorator using Tool() # We support Python 2.3, but the @-deco syntax would look like this: # @tools.check_access() def restricted(self): return "Welcome!" restricted = myauthtools.check_access()(restricted) userid = restricted def err_in_onstart(self): return "success!" def stream(self, id=None): for x in xrange(100000000): yield str(x) stream._cp_config = {'response.stream': True} conf = { # METHOD THREE: # Declare Tools in detached config '/demo': { 'tools.numerify.on': True, 'tools.numerify.map': {ntob("pie"): ntob("3.14159")}, }, '/demo/restricted': { 'request.show_tracebacks': False, }, '/demo/userid': { 'request.show_tracebacks': False, 'myauth.check_access.default': True, }, '/demo/errinstream': { 'response.stream': True, }, '/demo/err_in_onstart': { # Because this isn't a dict, on_start_resource will error. 'tools.numerify.map': "pie->3.14159" }, # Combined tools '/euro': { 'tools.gzip.on': True, 'tools.encode.on': True, }, # Priority specified in config '/decorated_euro/subpath': { 'tools.gzip.priority': 10, }, # Handler wrappers '/tarfile': {'tools.streamer.on': True} } app = cherrypy.tree.mount(root, config=conf) app.request_class.namespaces['myauth'] = myauthtools if sys.version_info >= (2, 5): from cherrypy.test import _test_decorators root.tooldecs = _test_decorators.ToolExamples() setup_server = staticmethod(setup_server) def testHookErrors(self): self.getPage("/demo/?id=1") # If body is "razdrez", then on_end_request is being called too early. self.assertBody("A horrorshow lomtick of cherry 3.14159") # If this fails, then on_end_request isn't being called at all. time.sleep(0.1) self.getPage("/demo/ended/1") self.assertBody("True") valerr = '\n raise ValueError()\nValueError' self.getPage("/demo/err?id=3") # If body is "razdrez", then on_end_request is being called too early. self.assertErrorPage(502, pattern=valerr) # If this fails, then on_end_request isn't being called at all. time.sleep(0.1) self.getPage("/demo/ended/3") self.assertBody("True") # If body is "razdrez", then on_end_request is being called too early. if (cherrypy.server.protocol_version == "HTTP/1.0" or getattr(cherrypy.server, "using_apache", False)): self.getPage("/demo/errinstream?id=5") # Because this error is raised after the response body has # started, the status should not change to an error status. self.assertStatus("200 OK") self.assertBody("nonconfidential") else: # Because this error is raised after the response body has # started, and because it's chunked output, an error is raised by # the HTTP client when it encounters incomplete output. self.assertRaises((ValueError, IncompleteRead), self.getPage, "/demo/errinstream?id=5") # If this fails, then on_end_request isn't being called at all. time.sleep(0.1) self.getPage("/demo/ended/5") self.assertBody("True") # Test the "__call__" technique (compile-time decorator). self.getPage("/demo/restricted") self.assertErrorPage(401) # Test compile-time decorator with kwargs from config. self.getPage("/demo/userid") self.assertBody("Welcome!") def testEndRequestOnDrop(self): old_timeout = None try: httpserver = cherrypy.server.httpserver old_timeout = httpserver.timeout except (AttributeError, IndexError): return self.skip() try: httpserver.timeout = timeout # Test that on_end_request is called even if the client drops. self.persistent = True try: conn = self.HTTP_CONN conn.putrequest("GET", "/demo/stream?id=9", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() # Skip the rest of the request and close the conn. This will # cause the server's active socket to error, which *should* # result in the request being aborted, and request.close being # called all the way up the stack (including WSGI middleware), # eventually calling our on_end_request hook. finally: self.persistent = False time.sleep(timeout * 2) # Test that the on_end_request hook was called. self.getPage("/demo/ended/9") self.assertBody("True") finally: if old_timeout is not None: httpserver.timeout = old_timeout def testGuaranteedHooks(self): # The 'critical' on_start_resource hook is 'failsafe' (guaranteed # to run even if there are failures in other on_start methods). # This is NOT true of the other hooks. # Here, we have set up a failure in NumerifyTool.numerify_map, # but our 'critical' hook should run and set the error to 502. self.getPage("/demo/err_in_onstart") self.assertErrorPage(502) self.assertInBody("AttributeError: 'str' object has no attribute 'items'") def testCombinedTools(self): expectedResult = (ntou("Hello,world") + europoundUnicode).encode('utf-8') zbuf = BytesIO() zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9) zfile.write(expectedResult) zfile.close() self.getPage("/euro", headers=[("Accept-Encoding", "gzip"), ("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7")]) self.assertInBody(zbuf.getvalue()[:3]) zbuf = BytesIO() zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6) zfile.write(expectedResult) zfile.close() self.getPage("/decorated_euro", headers=[("Accept-Encoding", "gzip")]) self.assertInBody(zbuf.getvalue()[:3]) # This returns a different value because gzip's priority was # lowered in conf, allowing the rotator to run after gzip. # Of course, we don't want breakage in production apps, # but it proves the priority was changed. self.getPage("/decorated_euro/subpath", headers=[("Accept-Encoding", "gzip")]) self.assertInBody(''.join([chr((ord(x) + 3) % 256) for x in zbuf.getvalue()])) def testBareHooks(self): content = "bit of a pain in me gulliver" self.getPage("/pipe", headers=[("Content-Length", str(len(content))), ("Content-Type", "text/plain")], method="POST", body=content) self.assertBody(content) def testHandlerWrapperTool(self): self.getPage("/tarfile") self.assertBody("I am a tarfile") def testToolWithConfig(self): if not sys.version_info >= (2, 5): return self.skip("skipped (Python 2.5+ only)") self.getPage('/tooldecs/blah') self.assertHeader('Content-Type', 'application/data') def testWarnToolOn(self): # get try: numon = cherrypy.tools.numerify.on except AttributeError: pass else: raise AssertionError("Tool.on did not error as it should have.") # set try: cherrypy.tools.numerify.on = True except AttributeError: pass else: raise AssertionError("Tool.on did not error as it should have.")
15,529
Python
.py
327
33.165138
97
0.56571
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,897
test_wsgi_vhost.py
evilhero_mylar/lib/cherrypy/test/test_wsgi_vhost.py
import cherrypy from cherrypy.test import helper class WSGI_VirtualHost_Test(helper.CPWebCase): def setup_server(): class ClassOfRoot(object): def __init__(self, name): self.name = name def index(self): return "Welcome to the %s website!" % self.name index.exposed = True default = cherrypy.Application(None) domains = {} for year in range(1997, 2008): app = cherrypy.Application(ClassOfRoot('Class of %s' % year)) domains['www.classof%s.example' % year] = app cherrypy.tree.graft(cherrypy._cpwsgi.VirtualHost(default, domains)) setup_server = staticmethod(setup_server) def test_welcome(self): if not cherrypy.server.using_wsgi: return self.skip("skipped (not using WSGI)... ") for year in range(1997, 2008): self.getPage("/", headers=[('Host', 'www.classof%s.example' % year)]) self.assertBody("Welcome to the Class of %s website!" % year)
1,127
Python
.py
23
35.608696
81
0.612808
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,898
test_virtualhost.py
evilhero_mylar/lib/cherrypy/test/test_virtualhost.py
import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import cherrypy from cherrypy.test import helper class VirtualHostTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "Hello, world" index.exposed = True def dom4(self): return "Under construction" dom4.exposed = True def method(self, value): return "You sent %s" % repr(value) method.exposed = True class VHost: def __init__(self, sitename): self.sitename = sitename def index(self): return "Welcome to %s" % self.sitename index.exposed = True def vmethod(self, value): return "You sent %s" % repr(value) vmethod.exposed = True def url(self): return cherrypy.url("nextpage") url.exposed = True # Test static as a handler (section must NOT include vhost prefix) static = cherrypy.tools.staticdir.handler(section='/static', dir=curdir) root = Root() root.mydom2 = VHost("Domain 2") root.mydom3 = VHost("Domain 3") hostmap = {'www.mydom2.com': '/mydom2', 'www.mydom3.com': '/mydom3', 'www.mydom4.com': '/dom4', } cherrypy.tree.mount(root, config={ '/': {'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)}, # Test static in config (section must include vhost prefix) '/mydom2/static2': {'tools.staticdir.on': True, 'tools.staticdir.root': curdir, 'tools.staticdir.dir': 'static', 'tools.staticdir.index': 'index.html', }, }) setup_server = staticmethod(setup_server) def testVirtualHost(self): self.getPage("/", [('Host', 'www.mydom1.com')]) self.assertBody('Hello, world') self.getPage("/mydom2/", [('Host', 'www.mydom1.com')]) self.assertBody('Welcome to Domain 2') self.getPage("/", [('Host', 'www.mydom2.com')]) self.assertBody('Welcome to Domain 2') self.getPage("/", [('Host', 'www.mydom3.com')]) self.assertBody('Welcome to Domain 3') self.getPage("/", [('Host', 'www.mydom4.com')]) self.assertBody('Under construction') # Test GET, POST, and positional params self.getPage("/method?value=root") self.assertBody("You sent u'root'") self.getPage("/vmethod?value=dom2+GET", [('Host', 'www.mydom2.com')]) self.assertBody("You sent u'dom2 GET'") self.getPage("/vmethod", [('Host', 'www.mydom3.com')], method="POST", body="value=dom3+POST") self.assertBody("You sent u'dom3 POST'") self.getPage("/vmethod/pos", [('Host', 'www.mydom3.com')]) self.assertBody("You sent 'pos'") # Test that cherrypy.url uses the browser url, not the virtual url self.getPage("/url", [('Host', 'www.mydom2.com')]) self.assertBody("%s://www.mydom2.com/nextpage" % self.scheme) def test_VHost_plus_Static(self): # Test static as a handler self.getPage("/static/style.css", [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/css;charset=utf-8') # Test static in config self.getPage("/static2/dirback.jpg", [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertHeader('Content-Type', 'image/jpeg') # Test static config with "index" arg self.getPage("/static2/", [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertBody('Hello, world\r\n') # Since tools.trailing_slash is on by default, this should redirect self.getPage("/static2", [('Host', 'www.mydom2.com')]) self.assertStatus(301)
4,219
Python
.py
87
35.195402
84
0.561117
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,899
test_states.py
evilhero_mylar/lib/cherrypy/test/test_states.py
from cherrypy._cpcompat import BadStatusLine, ntob import os import sys import threading import time import cherrypy engine = cherrypy.engine thisdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) class Dependency: def __init__(self, bus): self.bus = bus self.running = False self.startcount = 0 self.gracecount = 0 self.threads = {} def subscribe(self): self.bus.subscribe('start', self.start) self.bus.subscribe('stop', self.stop) self.bus.subscribe('graceful', self.graceful) self.bus.subscribe('start_thread', self.startthread) self.bus.subscribe('stop_thread', self.stopthread) def start(self): self.running = True self.startcount += 1 def stop(self): self.running = False def graceful(self): self.gracecount += 1 def startthread(self, thread_id): self.threads[thread_id] = None def stopthread(self, thread_id): del self.threads[thread_id] db_connection = Dependency(engine) def setup_server(): class Root: def index(self): return "Hello World" index.exposed = True def ctrlc(self): raise KeyboardInterrupt() ctrlc.exposed = True def graceful(self): engine.graceful() return "app was (gracefully) restarted succesfully" graceful.exposed = True def block_explicit(self): while True: if cherrypy.response.timed_out: cherrypy.response.timed_out = False return "broken!" time.sleep(0.01) block_explicit.exposed = True def block_implicit(self): time.sleep(0.5) return "response.timeout = %s" % cherrypy.response.timeout block_implicit.exposed = True cherrypy.tree.mount(Root()) cherrypy.config.update({ 'environment': 'test_suite', 'engine.deadlock_poll_freq': 0.1, }) db_connection.subscribe() # ------------ Enough helpers. Time for real live test cases. ------------ # from cherrypy.test import helper class ServerStateTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def setUp(self): cherrypy.server.socket_timeout = 0.1 def test_0_NormalStateFlow(self): engine.stop() # Our db_connection should not be running self.assertEqual(db_connection.running, False) self.assertEqual(db_connection.startcount, 1) self.assertEqual(len(db_connection.threads), 0) # Test server start engine.start() self.assertEqual(engine.state, engine.states.STARTED) host = cherrypy.server.socket_host port = cherrypy.server.socket_port self.assertRaises(IOError, cherrypy._cpserver.check_port, host, port) # The db_connection should be running now self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.startcount, 2) self.assertEqual(len(db_connection.threads), 0) self.getPage("/") self.assertBody("Hello World") self.assertEqual(len(db_connection.threads), 1) # Test engine stop. This will also stop the HTTP server. engine.stop() self.assertEqual(engine.state, engine.states.STOPPED) # Verify that our custom stop function was called self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) # Block the main thread now and verify that exit() works. def exittest(): self.getPage("/") self.assertBody("Hello World") engine.exit() cherrypy.server.start() engine.start_with_callback(exittest) engine.block() self.assertEqual(engine.state, engine.states.EXITING) def test_1_Restart(self): cherrypy.server.start() engine.start() # The db_connection should be running now self.assertEqual(db_connection.running, True) grace = db_connection.gracecount self.getPage("/") self.assertBody("Hello World") self.assertEqual(len(db_connection.threads), 1) # Test server restart from this thread engine.graceful() self.assertEqual(engine.state, engine.states.STARTED) self.getPage("/") self.assertBody("Hello World") self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.gracecount, grace + 1) self.assertEqual(len(db_connection.threads), 1) # Test server restart from inside a page handler self.getPage("/graceful") self.assertEqual(engine.state, engine.states.STARTED) self.assertBody("app was (gracefully) restarted succesfully") self.assertEqual(db_connection.running, True) self.assertEqual(db_connection.gracecount, grace + 2) # Since we are requesting synchronously, is only one thread used? # Note that the "/graceful" request has been flushed. self.assertEqual(len(db_connection.threads), 0) engine.stop() self.assertEqual(engine.state, engine.states.STOPPED) self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) def test_2_KeyboardInterrupt(self): # Raise a keyboard interrupt in the HTTP server's main thread. # We must start the server in this, the main thread engine.start() cherrypy.server.start() self.persistent = True try: # Make the first request and assert there's no "Connection: close". self.getPage("/") self.assertStatus('200 OK') self.assertBody("Hello World") self.assertNoHeader("Connection") cherrypy.server.httpserver.interrupt = KeyboardInterrupt engine.block() self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) self.assertEqual(engine.state, engine.states.EXITING) finally: self.persistent = False # Raise a keyboard interrupt in a page handler; on multithreaded # servers, this should occur in one of the worker threads. # This should raise a BadStatusLine error, since the worker # thread will just die without writing a response. engine.start() cherrypy.server.start() try: self.getPage("/ctrlc") except BadStatusLine: pass else: print(self.body) self.fail("AssertionError: BadStatusLine not raised") engine.block() self.assertEqual(db_connection.running, False) self.assertEqual(len(db_connection.threads), 0) def test_3_Deadlocks(self): cherrypy.config.update({'response.timeout': 0.2}) engine.start() cherrypy.server.start() try: self.assertNotEqual(engine.timeout_monitor.thread, None) # Request a "normal" page. self.assertEqual(engine.timeout_monitor.servings, []) self.getPage("/") self.assertBody("Hello World") # request.close is called async. while engine.timeout_monitor.servings: sys.stdout.write(".") time.sleep(0.01) # Request a page that explicitly checks itself for deadlock. # The deadlock_timeout should be 2 secs. self.getPage("/block_explicit") self.assertBody("broken!") # Request a page that implicitly breaks deadlock. # If we deadlock, we want to touch as little code as possible, # so we won't even call handle_error, just bail ASAP. self.getPage("/block_implicit") self.assertStatus(500) self.assertInBody("raise cherrypy.TimeoutError()") finally: engine.exit() def test_4_Autoreload(self): # Start the demo script in a new process p = helper.CPProcess(ssl=(self.scheme.lower()=='https')) p.write_conf( extra='test_case_name: "test_4_Autoreload"') p.start(imports='cherrypy.test._test_states_demo') try: self.getPage("/start") start = float(self.body) # Give the autoreloader time to cache the file time. time.sleep(2) # Touch the file os.utime(os.path.join(thisdir, "_test_states_demo.py"), None) # Give the autoreloader time to re-exec the process time.sleep(2) host = cherrypy.server.socket_host port = cherrypy.server.socket_port cherrypy._cpserver.wait_for_occupied_port(host, port) self.getPage("/start") self.assert_(float(self.body) > start) finally: # Shut down the spawned process self.getPage("/exit") p.join() def test_5_Start_Error(self): # If a process errors during start, it should stop the engine # and exit with a non-zero exit code. p = helper.CPProcess(ssl=(self.scheme.lower()=='https'), wait=True) p.write_conf( extra="""starterror: True test_case_name: "test_5_Start_Error" """ ) p.start(imports='cherrypy.test._test_states_demo') if p.exit_code == 0: self.fail("Process failed to return nonzero exit code.") class PluginTests(helper.CPWebCase): def test_daemonize(self): if os.name not in ['posix']: return self.skip("skipped (not on posix) ") self.HOST = '127.0.0.1' self.PORT = 8081 # Spawn the process and wait, when this returns, the original process # is finished. If it daemonized properly, we should still be able # to access pages. p = helper.CPProcess(ssl=(self.scheme.lower()=='https'), wait=True, daemonize=True, socket_host='127.0.0.1', socket_port=8081) p.write_conf( extra='test_case_name: "test_daemonize"') p.start(imports='cherrypy.test._test_states_demo') try: # Just get the pid of the daemonization process. self.getPage("/pid") self.assertStatus(200) page_pid = int(self.body) self.assertEqual(page_pid, p.get_pid()) finally: # Shut down the spawned process self.getPage("/exit") p.join() # Wait until here to test the exit code because we want to ensure # that we wait for the daemon to finish running before we fail. if p.exit_code != 0: self.fail("Daemonized parent process failed to exit cleanly.") class SignalHandlingTests(helper.CPWebCase): def test_SIGHUP_tty(self): # When not daemonized, SIGHUP should shut down the server. try: from signal import SIGHUP except ImportError: return self.skip("skipped (no SIGHUP) ") # Spawn the process. p = helper.CPProcess(ssl=(self.scheme.lower()=='https')) p.write_conf( extra='test_case_name: "test_SIGHUP_tty"') p.start(imports='cherrypy.test._test_states_demo') # Send a SIGHUP os.kill(p.get_pid(), SIGHUP) # This might hang if things aren't working right, but meh. p.join() def test_SIGHUP_daemonized(self): # When daemonized, SIGHUP should restart the server. try: from signal import SIGHUP except ImportError: return self.skip("skipped (no SIGHUP) ") if os.name not in ['posix']: return self.skip("skipped (not on posix) ") # Spawn the process and wait, when this returns, the original process # is finished. If it daemonized properly, we should still be able # to access pages. p = helper.CPProcess(ssl=(self.scheme.lower()=='https'), wait=True, daemonize=True) p.write_conf( extra='test_case_name: "test_SIGHUP_daemonized"') p.start(imports='cherrypy.test._test_states_demo') pid = p.get_pid() try: # Send a SIGHUP os.kill(pid, SIGHUP) # Give the server some time to restart time.sleep(2) self.getPage("/pid") self.assertStatus(200) new_pid = int(self.body) self.assertNotEqual(new_pid, pid) finally: # Shut down the spawned process self.getPage("/exit") p.join() def test_SIGTERM(self): # SIGTERM should shut down the server whether daemonized or not. try: from signal import SIGTERM except ImportError: return self.skip("skipped (no SIGTERM) ") try: from os import kill except ImportError: return self.skip("skipped (no os.kill) ") # Spawn a normal, undaemonized process. p = helper.CPProcess(ssl=(self.scheme.lower()=='https')) p.write_conf( extra='test_case_name: "test_SIGTERM"') p.start(imports='cherrypy.test._test_states_demo') # Send a SIGTERM os.kill(p.get_pid(), SIGTERM) # This might hang if things aren't working right, but meh. p.join() if os.name in ['posix']: # Spawn a daemonized process and test again. p = helper.CPProcess(ssl=(self.scheme.lower()=='https'), wait=True, daemonize=True) p.write_conf( extra='test_case_name: "test_SIGTERM_2"') p.start(imports='cherrypy.test._test_states_demo') # Send a SIGTERM os.kill(p.get_pid(), SIGTERM) # This might hang if things aren't working right, but meh. p.join() def test_signal_handler_unsubscribe(self): try: from signal import SIGTERM except ImportError: return self.skip("skipped (no SIGTERM) ") try: from os import kill except ImportError: return self.skip("skipped (no os.kill) ") # Spawn a normal, undaemonized process. p = helper.CPProcess(ssl=(self.scheme.lower()=='https')) p.write_conf( extra="""unsubsig: True test_case_name: "test_signal_handler_unsubscribe" """) p.start(imports='cherrypy.test._test_states_demo') # Send a SIGTERM os.kill(p.get_pid(), SIGTERM) # This might hang if things aren't working right, but meh. p.join() # Assert the old handler ran. target_line = open(p.error_log, 'rb').readlines()[-10] if not ntob("I am an old SIGTERM handler.") in target_line: self.fail("Old SIGTERM handler did not run.\n%r" % target_line)
15,081
Python
.py
360
31.611111
79
0.606897
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)