id
stringlengths
28
33
content
stringlengths
14
265k
max_stars_repo_path
stringlengths
49
55
crossvul-python_data_bad_1775_0
import datetime import decimal import unicodedata from importlib import import_module from django.conf import settings from django.utils import dateformat, datetime_safe, numberformat, six from django.utils.encoding import force_str from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ( check_for_language, get_language, to_locale, ) # format_cache is a mapping from (format_type, lang) to the format string. # By using the cache, it is possible to avoid running get_format_modules # repeatedly. _format_cache = {} _format_modules_cache = {} ISO_INPUT_FORMATS = { 'DATE_INPUT_FORMATS': ['%Y-%m-%d'], 'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'], 'DATETIME_INPUT_FORMATS': [ '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M', '%Y-%m-%d' ], } def reset_format_cache(): """Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed. """ global _format_cache, _format_modules_cache _format_cache = {} _format_modules_cache = {} def iter_format_modules(lang, format_module_path=None): """ Does the heavy lifting of finding format modules. """ if not check_for_language(lang): return if format_module_path is None: format_module_path = settings.FORMAT_MODULE_PATH format_locations = [] if format_module_path: if isinstance(format_module_path, six.string_types): format_module_path = [format_module_path] for path in format_module_path: format_locations.append(path + '.%s') format_locations.append('django.conf.locale.%s') locale = to_locale(lang) locales = [locale] if '_' in locale: locales.append(locale.split('_')[0]) for location in format_locations: for loc in locales: try: yield import_module('%s.formats' % (location % loc)) except ImportError: pass def get_format_modules(lang=None, reverse=False): """ Returns a list of the format modules found """ if lang is None: lang = get_language() modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))) if reverse: return list(reversed(modules)) return modules def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ format_type = force_str(format_type) if use_l10n or (use_l10n is None and settings.USE_L10N): if lang is None: lang = get_language() cache_key = (format_type, lang) try: cached = _format_cache[cache_key] if cached is not None: return cached else: # Return the general setting by default return getattr(settings, format_type) except KeyError: for module in get_format_modules(lang): try: val = getattr(module, format_type) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: if isinstance(val, tuple): val = list(val) val.append(iso_input) _format_cache[cache_key] = val return val except AttributeError: pass _format_cache[cache_key] = None return getattr(settings, format_type) get_format_lazy = lazy(get_format, six.text_type, list, tuple) def date_format(value, format=None, use_l10n=None): """ Formats a datetime.date or datetime.datetime object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) def time_format(value, format=None, use_l10n=None): """ Formats a datetime.time object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False): """ Formats a numeric value using localization settings If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if use_l10n or (use_l10n is None and settings.USE_L10N): lang = get_language() else: lang = None return numberformat.format( value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping ) def localize(value, use_l10n=None): """ Checks if value is a localizable type (date, number...) and returns it formatted as a string using current locale format. If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if isinstance(value, six.string_types): # Handle strings first for performance reasons. return value elif isinstance(value, bool): # Make sure booleans don't get treated as numbers return mark_safe(six.text_type(value)) elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.datetime): return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n) elif isinstance(value, datetime.date): return date_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.time): return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n) return value def localize_input(value, default=None): """ Checks if an input value is a localizable type and returns it formatted with the appropriate formatting string of the current locale. """ if isinstance(value, six.string_types): # Handle strings first for performance reasons. return value elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value) elif isinstance(value, datetime.datetime): value = datetime_safe.new_datetime(value) format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.date): value = datetime_safe.new_date(value) format = force_str(default or get_format('DATE_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.time): format = force_str(default or get_format('TIME_INPUT_FORMATS')[0]) return value.strftime(format) return value def sanitize_separators(value): """ Sanitizes a value according to the current decimal and thousand separator setting. Used with form field input. """ if settings.USE_L10N and isinstance(value, six.string_types): parts = [] decimal_separator = get_format('DECIMAL_SEPARATOR') if decimal_separator in value: value, decimals = value.split(decimal_separator, 1) parts.append(decimals) if settings.USE_THOUSAND_SEPARATOR: thousand_sep = get_format('THOUSAND_SEPARATOR') if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3: # Special case where we suspect a dot meant decimal separator (see #22171) pass else: for replacement in { thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}: value = value.replace(replacement, '') parts.append(value) value = '.'.join(reversed(parts)) return value
./CrossVul/dataset_final_sorted/CWE-200/py/bad_1775_0
crossvul-python_data_good_1574_0
""" Gather information about a system and report it using plugins supplied for application-specific information """ # sosreport.py # gather information about a system and report it # Copyright (C) 2006 Steve Conklin <sconklin@redhat.com> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. import sys import traceback import os import errno import logging from optparse import OptionParser, Option from sos.plugins import import_plugin from sos.utilities import ImporterHelper from stat import ST_UID, ST_GID, ST_MODE, ST_CTIME, ST_ATIME, ST_MTIME, S_IMODE from time import strftime, localtime from collections import deque import tempfile from sos import _sos as _ from sos import __version__ import sos.policies from sos.archive import TarFileArchive, ZipFileArchive from sos.reporting import (Report, Section, Command, CopiedFile, CreatedFile, Alert, Note, PlainTextReport) # PYCOMPAT import six from six.moves import zip, input if six.PY3: from configparser import ConfigParser else: from ConfigParser import ConfigParser from six import print_ # file system errors that should terminate a run fatal_fs_errors = (errno.ENOSPC, errno.EROFS) def _format_list(first_line, items, indent=False): lines = [] line = first_line if indent: newline = len(first_line) * ' ' else: newline = "" for item in items: if len(line) + len(item) + 2 > 72: lines.append(line) line = newline line = line + item + ', ' if line[-2:] == ', ': line = line[:-2] lines.append(line) return lines class TempFileUtil(object): def __init__(self, tmp_dir): self.tmp_dir = tmp_dir self.files = [] def new(self): fd, fname = tempfile.mkstemp(dir=self.tmp_dir) fobj = open(fname, 'w') self.files.append((fname, fobj)) return fobj def clean(self): for fname, f in self.files: try: f.flush() f.close() except Exception: pass try: os.unlink(fname) except Exception: pass self.files = [] class OptionParserExtended(OptionParser): """ Show examples """ def print_help(self, out=sys.stdout): """ Prints help content including examples """ OptionParser.print_help(self, out) print_() print_("Some examples:") print_() print_(" enable cluster plugin only and collect dlm lockdumps:") print_(" # sosreport -o cluster -k cluster.lockdump") print_() print_(" disable memory and samba plugins, turn off rpm -Va " "collection:") print_(" # sosreport -n memory,samba -k rpm.rpmva=off") print_() class SosOption(Option): """Allow to specify comma delimited list of plugins""" ACTIONS = Option.ACTIONS + ("extend",) STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) def take_action(self, action, dest, opt, value, values, parser): """ Performs list extension on plugins """ if action == "extend": try: lvalue = value.split(",") except: pass else: values.ensure_value(dest, deque()).extend(lvalue) else: Option.take_action(self, action, dest, opt, value, values, parser) class XmlReport(object): """ Report build class """ def __init__(self): try: import libxml2 except ImportError: self.enabled = False return else: self.enabled = False return self.doc = libxml2.newDoc("1.0") self.root = self.doc.newChild(None, "sos", None) self.commands = self.root.newChild(None, "commands", None) self.files = self.root.newChild(None, "files", None) def add_command(self, cmdline, exitcode, stdout=None, stderr=None, f_stdout=None, f_stderr=None, runtime=None): """ Appends command run into report """ if not self.enabled: return cmd = self.commands.newChild(None, "cmd", None) cmd.setNsProp(None, "cmdline", cmdline) cmdchild = cmd.newChild(None, "exitcode", str(exitcode)) if runtime: cmd.newChild(None, "runtime", str(runtime)) if stdout or f_stdout: cmdchild = cmd.newChild(None, "stdout", stdout) if f_stdout: cmdchild.setNsProp(None, "file", f_stdout) if stderr or f_stderr: cmdchild = cmd.newChild(None, "stderr", stderr) if f_stderr: cmdchild.setNsProp(None, "file", f_stderr) def add_file(self, fname, stats): """ Appends file(s) added to report """ if not self.enabled: return cfile = self.files.newChild(None, "file", None) cfile.setNsProp(None, "fname", fname) cchild = cfile.newChild(None, "uid", str(stats[ST_UID])) cchild = cfile.newChild(None, "gid", str(stats[ST_GID])) cfile.newChild(None, "mode", str(oct(S_IMODE(stats[ST_MODE])))) cchild = cfile.newChild(None, "ctime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_CTIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_CTIME])) cchild = cfile.newChild(None, "atime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_ATIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_ATIME])) cchild = cfile.newChild(None, "mtime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_MTIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_MTIME])) def serialize(self): """ Serializes xml """ if not self.enabled: return self.ui_log.info(self.doc.serialize(None, 1)) def serialize_to_file(self, fname): """ Serializes to file """ if not self.enabled: return outf = tempfile.NamedTemporaryFile() outf.write(self.doc.serialize(None, 1)) outf.flush() self.archive.add_file(outf.name, dest=fname) outf.close() class SoSOptions(object): _list_plugins = False _noplugins = [] _enableplugins = [] _onlyplugins = [] _plugopts = [] _usealloptions = False _all_logs = False _log_size = 10 _batch = False _build = False _verbosity = 0 _verify = False _quiet = False _debug = False _case_id = "" _customer_name = "" _profiles = deque() _list_profiles = False _config_file = "" _tmp_dir = "" _report = True _compression_type = 'auto' _options = None def __init__(self, args=None): if args: self._options = self._parse_args(args) else: self._options = None def _check_options_initialized(self): if self._options is not None: raise ValueError("SoSOptions object already initialized " + "from command line") @property def list_plugins(self): if self._options is not None: return self._options.list_plugins return self._list_plugins @list_plugins.setter def list_plugins(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.list_plugins expects a boolean") self._list_plugins = value @property def noplugins(self): if self._options is not None: return self._options.noplugins return self._noplugins @noplugins.setter def noplugins(self, value): self._check_options_initialized() self._noplugins = value @property def enableplugins(self): if self._options is not None: return self._options.enableplugins return self._enableplugins @enableplugins.setter def enableplugins(self, value): self._check_options_initialized() self._enableplugins = value @property def onlyplugins(self): if self._options is not None: return self._options.onlyplugins return self._onlyplugins @onlyplugins.setter def onlyplugins(self, value): self._check_options_initialized() self._onlyplugins = value @property def plugopts(self): if self._options is not None: return self._options.plugopts return self._plugopts @plugopts.setter def plugopts(self, value): # If we check for anything it should be itterability. # if not isinstance(value, list): # raise TypeError("SoSOptions.plugopts expects a list") self._plugopts = value @property def usealloptions(self): if self._options is not None: return self._options.usealloptions return self._usealloptions @usealloptions.setter def usealloptions(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.usealloptions expects a boolean") self._usealloptions = value @property def all_logs(self): if self._options is not None: return self._options.all_logs return self._all_logs @all_logs.setter def all_logs(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.all_logs expects a boolean") self._all_logs = value @property def log_size(self): if self._options is not None: return self._options.log_size return self._log_size @log_size.setter def log_size(self, value): self._check_options_initialized() if value < 0: raise ValueError("SoSOptions.log_size expects a value greater " "than zero") self._log_size = value @property def batch(self): if self._options is not None: return self._options.batch return self._batch @batch.setter def batch(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.batch expects a boolean") self._batch = value @property def build(self): if self._options is not None: return self._options.build return self._build @build.setter def build(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.build expects a boolean") self._build = value @property def verbosity(self): if self._options is not None: return self._options.verbosity return self._verbosity @verbosity.setter def verbosity(self, value): self._check_options_initialized() if value < 0 or value > 3: raise ValueError("SoSOptions.verbosity expects a value [0..3]") self._verbosity = value @property def verify(self): if self._options is not None: return self._options.verify return self._verify @verify.setter def verify(self, value): self._check_options_initialized() if value < 0 or value > 3: raise ValueError("SoSOptions.verify expects a value [0..3]") self._verify = value @property def quiet(self): if self._options is not None: return self._options.quiet return self._quiet @quiet.setter def quiet(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.quiet expects a boolean") self._quiet = value @property def debug(self): if self._options is not None: return self._options.debug return self._debug @debug.setter def debug(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.debug expects a boolean") self._debug = value @property def case_id(self): if self._options is not None: return self._options.case_id return self._case_id @case_id.setter def case_id(self, value): self._check_options_initialized() self._case_id = value @property def customer_name(self): if self._options is not None: return self._options.customer_name return self._customer_name @customer_name.setter def customer_name(self, value): self._check_options_initialized() self._customer_name = value @property def profiles(self): if self._options is not None: return self._options.profiles return self._profiles @profiles.setter def profiles(self, value): self._check_options_initialized() self._profiles = value @property def list_profiles(self): if self._options is not None: return self._options.list_profiles return self._list_profiles @list_profiles.setter def list_profiles(self, value): self._check_options_initialized() self._list_profiles = value @property def config_file(self): if self._options is not None: return self._options.config_file return self._config_file @config_file.setter def config_file(self, value): self._check_options_initialized() self._config_file = value @property def tmp_dir(self): if self._options is not None: return self._options.tmp_dir return self._tmp_dir @tmp_dir.setter def tmp_dir(self, value): self._check_options_initialized() self._tmp_dir = value @property def report(self): if self._options is not None: return self._options.report return self._report @report.setter def report(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.report expects a boolean") self._report = value @property def compression_type(self): if self._options is not None: return self._options.compression_type return self._compression_type @compression_type.setter def compression_type(self, value): self._check_options_initialized() self._compression_type = value def _parse_args(self, args): """ Parse command line options and arguments""" self.parser = parser = OptionParserExtended(option_class=SosOption) parser.add_option("-l", "--list-plugins", action="store_true", dest="list_plugins", default=False, help="list plugins and available plugin options") parser.add_option("-n", "--skip-plugins", action="extend", dest="noplugins", type="string", help="disable these plugins", default=deque()) parser.add_option("-e", "--enable-plugins", action="extend", dest="enableplugins", type="string", help="enable these plugins", default=deque()) parser.add_option("-o", "--only-plugins", action="extend", dest="onlyplugins", type="string", help="enable these plugins only", default=deque()) parser.add_option("-k", "--plugin-option", action="extend", dest="plugopts", type="string", help="plugin options in plugname.option=value " "format (see -l)", default=deque()) parser.add_option("--log-size", action="store", dest="log_size", default=10, type="int", help="set a limit on the size of collected logs") parser.add_option("-a", "--alloptions", action="store_true", dest="usealloptions", default=False, help="enable all options for loaded plugins") parser.add_option("--all-logs", action="store_true", dest="all_logs", default=False, help="collect all available logs regardless of size") parser.add_option("--batch", action="store_true", dest="batch", default=False, help="batch mode - do not prompt interactively") parser.add_option("--build", action="store_true", dest="build", default=False, help="preserve the temporary directory and do not " "package results") parser.add_option("-v", "--verbose", action="count", dest="verbosity", help="increase verbosity") parser.add_option("", "--verify", action="store_true", dest="verify", default=False, help="perform data verification during collection") parser.add_option("", "--quiet", action="store_true", dest="quiet", default=False, help="only print fatal errors") parser.add_option("--debug", action="count", dest="debug", help="enable interactive debugging using the python " "debugger") parser.add_option("--ticket-number", action="store", dest="case_id", help="specify ticket number") parser.add_option("--case-id", action="store", dest="case_id", help="specify case identifier") parser.add_option("-p", "--profile", action="extend", dest="profiles", type="string", default=deque(), help="enable plugins selected by the given profiles") parser.add_option("--list-profiles", action="store_true", dest="list_profiles", default=False) parser.add_option("--name", action="store", dest="customer_name", help="specify report name") parser.add_option("--config-file", action="store", dest="config_file", help="specify alternate configuration file") parser.add_option("--tmp-dir", action="store", dest="tmp_dir", help="specify alternate temporary directory", default=None) parser.add_option("--no-report", action="store_true", dest="report", help="Disable HTML/XML reporting", default=False) parser.add_option("-z", "--compression-type", dest="compression_type", help="compression technology to use [auto, zip, " "gzip, bzip2, xz] (default=auto)", default="auto") return parser.parse_args(args)[0] class SoSReport(object): """The main sosreport class""" def __init__(self, args): self.loaded_plugins = deque() self.skipped_plugins = deque() self.all_options = deque() self.xml_report = XmlReport() self.global_plugin_options = {} self.archive = None self.tempfile_util = None self._args = args try: import signal signal.signal(signal.SIGTERM, self.get_exit_handler()) except Exception: pass # not available in java, but we don't care self.opts = SoSOptions(args) self._set_debug() self._read_config() try: self.policy = sos.policies.load() except KeyboardInterrupt: self._exit(0) self._is_root = self.policy.is_root() self.tmpdir = os.path.abspath( self.policy.get_tmp_dir(self.opts.tmp_dir)) if not os.path.isdir(self.tmpdir) \ or not os.access(self.tmpdir, os.W_OK): # write directly to stderr as logging is not initialised yet sys.stderr.write("temporary directory %s " % self.tmpdir + "does not exist or is not writable\n") self._exit(1) self.tempfile_util = TempFileUtil(self.tmpdir) self._set_directories() def print_header(self): self.ui_log.info("\n%s\n" % _("sosreport (version %s)" % (__version__,))) def get_commons(self): return { 'cmddir': self.cmddir, 'logdir': self.logdir, 'rptdir': self.rptdir, 'tmpdir': self.tmpdir, 'soslog': self.soslog, 'policy': self.policy, 'verbosity': self.opts.verbosity, 'xmlreport': self.xml_report, 'cmdlineopts': self.opts, 'config': self.config, 'global_plugin_options': self.global_plugin_options, } def get_temp_file(self): return self.tempfile_util.new() def _set_archive(self): archive_name = os.path.join(self.tmpdir, self.policy.get_archive_name()) if self.opts.compression_type == 'auto': auto_archive = self.policy.get_preferred_archive() self.archive = auto_archive(archive_name, self.tmpdir) elif self.opts.compression_type == 'zip': self.archive = ZipFileArchive(archive_name, self.tmpdir) else: self.archive = TarFileArchive(archive_name, self.tmpdir) self.archive.set_debug(True if self.opts.debug else False) def _make_archive_paths(self): self.archive.makedirs(self.cmddir, 0o755) self.archive.makedirs(self.logdir, 0o755) self.archive.makedirs(self.rptdir, 0o755) def _set_directories(self): self.cmddir = 'sos_commands' self.logdir = 'sos_logs' self.rptdir = 'sos_reports' def _set_debug(self): if self.opts.debug: sys.excepthook = self._exception self.raise_plugins = True else: self.raise_plugins = False @staticmethod def _exception(etype, eval_, etrace): """ Wrap exception in debugger if not in tty """ if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # we are in interactive mode or we don't have a tty-like # device, so we call the default hook sys.__excepthook__(etype, eval_, etrace) else: import pdb # we are NOT in interactive mode, print the exception... traceback.print_exception(etype, eval_, etrace, limit=2, file=sys.stdout) print_() # ...then start the debugger in post-mortem mode. pdb.pm() def _exit(self, error=0): raise SystemExit() # sys.exit(error) def get_exit_handler(self): def exit_handler(signum, frame): self._exit() return exit_handler def _read_config(self): self.config = ConfigParser() if self.opts.config_file: config_file = self.opts.config_file else: config_file = '/etc/sos.conf' try: self.config.readfp(open(config_file)) except IOError: pass def _setup_logging(self): # main soslog self.soslog = logging.getLogger('sos') self.soslog.setLevel(logging.DEBUG) self.sos_log_file = self.get_temp_file() self.sos_log_file.close() flog = logging.FileHandler(self.sos_log_file.name) flog.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s')) flog.setLevel(logging.INFO) self.soslog.addHandler(flog) if not self.opts.quiet: console = logging.StreamHandler(sys.stderr) console.setFormatter(logging.Formatter('%(message)s')) if self.opts.verbosity and self.opts.verbosity > 1: console.setLevel(logging.DEBUG) flog.setLevel(logging.DEBUG) elif self.opts.verbosity and self.opts.verbosity > 0: console.setLevel(logging.INFO) flog.setLevel(logging.DEBUG) else: console.setLevel(logging.WARNING) self.soslog.addHandler(console) # ui log self.ui_log = logging.getLogger('sos_ui') self.ui_log.setLevel(logging.INFO) self.sos_ui_log_file = self.get_temp_file() self.sos_ui_log_file.close() ui_fhandler = logging.FileHandler(self.sos_ui_log_file.name) ui_fhandler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s')) self.ui_log.addHandler(ui_fhandler) if not self.opts.quiet: ui_console = logging.StreamHandler(sys.stdout) ui_console.setFormatter(logging.Formatter('%(message)s')) ui_console.setLevel(logging.INFO) self.ui_log.addHandler(ui_console) def _finish_logging(self): logging.shutdown() # Make sure the log files are added before we remove the log # handlers. This prevents "No handlers could be found.." messages # from leaking to the console when running in --quiet mode when # Archive classes attempt to acess the log API. if getattr(self, "sos_log_file", None): self.archive.add_file(self.sos_log_file.name, dest=os.path.join('sos_logs', 'sos.log')) if getattr(self, "sos_ui_log_file", None): self.archive.add_file(self.sos_ui_log_file.name, dest=os.path.join('sos_logs', 'ui.log')) def _get_disabled_plugins(self): disabled = [] if self.config.has_option("plugins", "disable"): disabled = [plugin.strip() for plugin in self.config.get("plugins", "disable").split(',')] return disabled def _is_in_profile(self, plugin_class): onlyplugins = self.opts.onlyplugins if not len(self.opts.profiles): return True if not hasattr(plugin_class, "profiles"): return False if onlyplugins and not self._is_not_specified(plugin_class.name()): return True return any([p in self.opts.profiles for p in plugin_class.profiles]) def _is_skipped(self, plugin_name): return (plugin_name in self.opts.noplugins or plugin_name in self._get_disabled_plugins()) def _is_inactive(self, plugin_name, pluginClass): return (not pluginClass(self.get_commons()).check_enabled() and plugin_name not in self.opts.enableplugins and plugin_name not in self.opts.onlyplugins) def _is_not_default(self, plugin_name, pluginClass): return (not pluginClass(self.get_commons()).default_enabled() and plugin_name not in self.opts.enableplugins and plugin_name not in self.opts.onlyplugins) def _is_not_specified(self, plugin_name): return (self.opts.onlyplugins and plugin_name not in self.opts.onlyplugins) def _skip(self, plugin_class, reason="unknown"): self.skipped_plugins.append(( plugin_class.name(), plugin_class(self.get_commons()), reason )) def _load(self, plugin_class): self.loaded_plugins.append(( plugin_class.name(), plugin_class(self.get_commons()) )) def load_plugins(self): import sos.plugins helper = ImporterHelper(sos.plugins) plugins = helper.get_modules() self.plugin_names = deque() self.profiles = set() using_profiles = len(self.opts.profiles) # validate and load plugins for plug in plugins: plugbase, ext = os.path.splitext(plug) try: plugin_classes = import_plugin( plugbase, tuple(self.policy.valid_subclasses)) if not len(plugin_classes): # no valid plugin classes for this policy continue plugin_class = self.policy.match_plugin(plugin_classes) if not self.policy.validate_plugin(plugin_class): self.soslog.warning( _("plugin %s does not validate, skipping") % plug) if self.opts.verbosity > 0: self._skip(plugin_class, _("does not validate")) continue if plugin_class.requires_root and not self._is_root: self.soslog.info(_("plugin %s requires root permissions" "to execute, skipping") % plug) self._skip(plugin_class, _("requires root")) continue # plug-in is valid, let's decide whether run it or not self.plugin_names.append(plugbase) if hasattr(plugin_class, "profiles"): self.profiles.update(plugin_class.profiles) in_profile = self._is_in_profile(plugin_class) if not in_profile: self._skip(plugin_class, _("excluded")) continue if self._is_skipped(plugbase): self._skip(plugin_class, _("skipped")) continue if self._is_inactive(plugbase, plugin_class): self._skip(plugin_class, _("inactive")) continue if self._is_not_default(plugbase, plugin_class): self._skip(plugin_class, _("optional")) continue # true when the null (empty) profile is active default_profile = not using_profiles and in_profile if self._is_not_specified(plugbase) and default_profile: self._skip(plugin_class, _("not specified")) continue self._load(plugin_class) except Exception as e: self.soslog.warning(_("plugin %s does not install, " "skipping: %s") % (plug, e)) if self.raise_plugins: raise def _set_all_options(self): if self.opts.usealloptions: for plugname, plug in self.loaded_plugins: for name, parms in zip(plug.opt_names, plug.opt_parms): if type(parms["enabled"]) == bool: parms["enabled"] = True def _set_tunables(self): if self.config.has_section("tunables"): if not self.opts.plugopts: self.opts.plugopts = deque() for opt, val in self.config.items("tunables"): if not opt.split('.')[0] in self._get_disabled_plugins(): self.opts.plugopts.append(opt + "=" + val) if self.opts.plugopts: opts = {} for opt in self.opts.plugopts: # split up "general.syslogsize=5" try: opt, val = opt.split("=") except: val = True else: if val.lower() in ["off", "disable", "disabled", "false"]: val = False else: # try to convert string "val" to int() try: val = int(val) except: pass # split up "general.syslogsize" try: plug, opt = opt.split(".") except: plug = opt opt = True try: opts[plug] except KeyError: opts[plug] = deque() opts[plug].append((opt, val)) for plugname, plug in self.loaded_plugins: if plugname in opts: for opt, val in opts[plugname]: if not plug.set_option(opt, val): self.soslog.error('no such option "%s" for plugin ' '(%s)' % (opt, plugname)) self._exit(1) del opts[plugname] for plugname in opts.keys(): self.soslog.error('unable to set option for disabled or ' 'non-existing plugin (%s)' % (plugname)) def _check_for_unknown_plugins(self): import itertools for plugin in itertools.chain(self.opts.onlyplugins, self.opts.noplugins, self.opts.enableplugins): plugin_name = plugin.split(".")[0] if plugin_name not in self.plugin_names: self.soslog.fatal('a non-existing plugin (%s) was specified ' 'in the command line' % (plugin_name)) self._exit(1) def _set_plugin_options(self): for plugin_name, plugin in self.loaded_plugins: names, parms = plugin.get_all_options() for optname, optparm in zip(names, parms): self.all_options.append((plugin, plugin_name, optname, optparm)) def list_plugins(self): if not self.loaded_plugins and not self.skipped_plugins: self.soslog.fatal(_("no valid plugins found")) return if self.loaded_plugins: self.ui_log.info(_("The following plugins are currently enabled:")) self.ui_log.info("") for (plugname, plug) in self.loaded_plugins: self.ui_log.info(" %-20s %s" % (plugname, plug.get_description())) else: self.ui_log.info(_("No plugin enabled.")) self.ui_log.info("") if self.skipped_plugins: self.ui_log.info(_("The following plugins are currently " "disabled:")) self.ui_log.info("") for (plugname, plugclass, reason) in self.skipped_plugins: self.ui_log.info(" %-20s %-14s %s" % ( plugname, reason, plugclass.get_description())) self.ui_log.info("") if self.all_options: self.ui_log.info(_("The following plugin options are available:")) self.ui_log.info("") for (plug, plugname, optname, optparm) in self.all_options: # format option value based on its type (int or bool) if type(optparm["enabled"]) == bool: if optparm["enabled"] is True: tmpopt = "on" else: tmpopt = "off" else: tmpopt = optparm["enabled"] self.ui_log.info(" %-25s %-15s %s" % ( plugname + "." + optname, tmpopt, optparm["desc"])) else: self.ui_log.info(_("No plugin options available.")) self.ui_log.info("") profiles = list(self.profiles) profiles.sort() lines = _format_list("Profiles: ", profiles, indent=True) for line in lines: self.ui_log.info(" %s" % line) self.ui_log.info("") self.ui_log.info(" %d profiles, %d plugins" % (len(self.profiles), len(self.loaded_plugins))) self.ui_log.info("") def list_profiles(self): if not self.profiles: self.soslog.fatal(_("no valid profiles found")) return self.ui_log.info(_("The following profiles are available:")) self.ui_log.info("") def _has_prof(c): return hasattr(c, "profiles") profiles = list(self.profiles) profiles.sort() for profile in profiles: plugins = [] for name, plugin in self.loaded_plugins: if _has_prof(plugin) and profile in plugin.profiles: plugins.append(name) lines = _format_list("%-15s " % profile, plugins, indent=True) for line in lines: self.ui_log.info(" %s" % line) self.ui_log.info("") self.ui_log.info(" %d profiles, %d plugins" % (len(profiles), len(self.loaded_plugins))) self.ui_log.info("") def batch(self): if self.opts.batch: self.ui_log.info(self.policy.get_msg()) else: msg = self.policy.get_msg() msg += _("Press ENTER to continue, or CTRL-C to quit.\n") try: input(msg) except: self.ui_log.info("") self._exit() def _log_plugin_exception(self, plugin_name): self.soslog.error("%s\n%s" % (plugin_name, traceback.format_exc())) def prework(self): self.policy.pre_work() try: self.ui_log.info(_(" Setting up archive ...")) compression_methods = ('auto', 'zip', 'bzip2', 'gzip', 'xz') method = self.opts.compression_type if method not in compression_methods: compression_list = ', '.join(compression_methods) self.ui_log.error("") self.ui_log.error("Invalid compression specified: " + method) self.ui_log.error("Valid types are: " + compression_list) self.ui_log.error("") self._exit(1) self._set_archive() self._make_archive_paths() return except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while setting up archive" % e.strerror) self.ui_log.error("") else: raise e except Exception as e: import traceback self.ui_log.error("") self.ui_log.error(" Unexpected exception setting up archive:") traceback.print_exc(e) self.ui_log.error(e) self._exit(1) def setup(self): msg = "[%s:%s] executing 'sosreport %s'" self.soslog.info(msg % (__name__, "setup", " ".join(self._args))) self.ui_log.info(_(" Setting up plugins ...")) for plugname, plug in self.loaded_plugins: try: plug.archive = self.archive plug.setup() except KeyboardInterrupt: raise except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while setting up plugins" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise else: self._log_plugin_exception(plugname) def version(self): """Fetch version information from all plugins and store in the report version file""" versions = [] versions.append("sosreport: %s" % __version__) for plugname, plug in self.loaded_plugins: versions.append("%s: %s" % (plugname, plug.version)) self.archive.add_string(content="\n".join(versions), dest='version.txt') def collect(self): self.ui_log.info(_(" Running plugins. Please wait ...")) self.ui_log.info("") plugruncount = 0 for i in zip(self.loaded_plugins): plugruncount += 1 plugname, plug = i[0] status_line = (" Running %d/%d: %s... " % (plugruncount, len(self.loaded_plugins), plugname)) if self.opts.verbosity == 0: status_line = "\r%s" % status_line else: status_line = "%s\n" % status_line if not self.opts.quiet: sys.stdout.write(status_line) sys.stdout.flush() try: plug.collect() except KeyboardInterrupt: raise except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while collecting plugin data" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise else: self._log_plugin_exception(plugname) self.ui_log.info("") def report(self): for plugname, plug in self.loaded_plugins: for oneFile in plug.copied_files: try: self.xml_report.add_file(oneFile["srcpath"], os.stat(oneFile["srcpath"])) except: pass try: self.xml_report.serialize_to_file(os.path.join(self.rptdir, "sosreport.xml")) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing report data" % e.strerror) self.ui_log.error("") self._exit(1) def plain_report(self): report = Report() for plugname, plug in self.loaded_plugins: section = Section(name=plugname) for alert in plug.alerts: section.add(Alert(alert)) if plug.custom_text: section.add(Note(plug.custom_text)) for f in plug.copied_files: section.add(CopiedFile(name=f['srcpath'], href=".." + f['dstpath'])) for cmd in plug.executed_commands: section.add(Command(name=cmd['exe'], return_code=0, href="../" + cmd['file'])) for content, f in plug.copy_strings: section.add(CreatedFile(name=f)) report.add(section) try: fd = self.get_temp_file() fd.write(str(PlainTextReport(report))) fd.flush() self.archive.add_file(fd.name, dest=os.path.join('sos_reports', 'sos.txt')) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing text report" % e.strerror) self.ui_log.error("") self._exit(1) def html_report(self): try: self._html_report() except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing HTML report" % e.strerror) self.ui_log.error("") self._exit(1) def _html_report(self): # Generate the header for the html output file rfd = self.get_temp_file() rfd.write(""" <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <head> <link rel="stylesheet" type="text/css" media="screen" href="donot.css" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Sos System Report</title> </head> <body> """) # Make a pass to gather Alerts and a list of module names allAlerts = deque() plugNames = deque() for plugname, plug in self.loaded_plugins: for alert in plug.alerts: allAlerts.append('<a href="#%s">%s</a>: %s' % (plugname, plugname, alert)) plugNames.append(plugname) # Create a table of links to the module info rfd.write("<hr/><h3>Loaded Plugins:</h3>") rfd.write("<table><tr>\n") rr = 0 for i in range(len(plugNames)): rfd.write('<td><a href="#%s">%s</a></td>\n' % (plugNames[i], plugNames[i])) rr = divmod(i, 4)[1] if (rr == 3): rfd.write('</tr>') if not (rr == 3): rfd.write('</tr>') rfd.write('</table>\n') rfd.write('<hr/><h3>Alerts:</h3>') rfd.write('<ul>') for alert in allAlerts: rfd.write('<li>%s</li>' % alert) rfd.write('</ul>') # Call the report method for each plugin for plugname, plug in self.loaded_plugins: try: html = plug.report() except: if self.raise_plugins: raise else: rfd.write(html) rfd.write("</body></html>") rfd.flush() self.archive.add_file(rfd.name, dest=os.path.join('sos_reports', 'sos.html')) def postproc(self): for plugname, plug in self.loaded_plugins: try: plug.postproc() except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while post-processing plugin data" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise def final_work(self): # this must come before archive creation to ensure that log # files are closed and cleaned up at exit. self._finish_logging() # package up the results for the support organization if not self.opts.build: old_umask = os.umask(0o077) if not self.opts.quiet: print(_("Creating compressed archive...")) # compression could fail for a number of reasons try: final_filename = self.archive.finalize( self.opts.compression_type) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while finalizing archive" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.opts.debug: raise else: return False finally: os.umask(old_umask) else: final_filename = self.archive.get_archive_path() self.policy.display_results(final_filename, build=self.opts.build) self.tempfile_util.clean() return True def verify_plugins(self): if not self.loaded_plugins: self.soslog.error(_("no valid plugins were enabled")) return False return True def set_global_plugin_option(self, key, value): self.global_plugin_options[key] = value def execute(self): try: self._setup_logging() self.policy.set_commons(self.get_commons()) self.print_header() self.load_plugins() self._set_all_options() self._set_tunables() self._check_for_unknown_plugins() self._set_plugin_options() if self.opts.list_plugins: self.list_plugins() return True if self.opts.list_profiles: self.list_profiles() return True # verify that at least one plug-in is enabled if not self.verify_plugins(): return False self.batch() self.prework() self.setup() self.collect() if not self.opts.report: self.report() self.html_report() self.plain_report() self.postproc() self.version() return self.final_work() except (SystemExit, KeyboardInterrupt): if self.archive: self.archive.cleanup() if self.tempfile_util: self.tempfile_util.clean() return False def main(args): """The main entry point""" sos = SoSReport(args) sos.execute() # vim: et ts=4 sw=4
./CrossVul/dataset_final_sorted/CWE-200/py/good_1574_0
crossvul-python_data_bad_3112_1
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from django.shortcuts import render, get_object_or_404, redirect from django.http import HttpResponse, HttpResponseRedirect from django.contrib.auth import logout from django.conf import settings from django.utils.translation import ugettext as _ from django.contrib.auth.decorators import login_required from django.core.mail.message import EmailMultiAlternatives from django.utils import translation from django.utils.cache import patch_response_headers from django.utils.crypto import get_random_string from django.utils.translation import get_language from django.contrib.auth.models import User from django.contrib.auth import views as auth_views from django.views.generic import TemplateView from django.contrib.auth import update_session_auth_hash from django.core.urlresolvers import reverse from rest_framework.authtoken.models import Token from six.moves.urllib.parse import urlencode from social.backends.utils import load_backends from social.apps.django_app.utils import BACKENDS from social.apps.django_app.views import complete from weblate.accounts.forms import ( RegistrationForm, PasswordForm, PasswordChangeForm, EmailForm, ResetForm, LoginForm, HostingForm, CaptchaRegistrationForm ) from weblate.logger import LOGGER from weblate.accounts.avatar import get_avatar_image, get_fallback_avatar_url from weblate.accounts.models import set_lang, remove_user, Profile from weblate.trans import messages from weblate.trans.models import Change, Project, SubProject from weblate.trans.views.helper import get_project from weblate.accounts.forms import ( ProfileForm, SubscriptionForm, UserForm, ContactForm, SubscriptionSettingsForm, UserSettingsForm, DashboardSettingsForm ) from weblate import appsettings CONTACT_TEMPLATE = ''' Message from %(name)s <%(email)s>: %(message)s ''' HOSTING_TEMPLATE = ''' %(name)s <%(email)s> wants to host %(project)s Project: %(project)s Website: %(url)s Repository: %(repo)s Filemask: %(mask)s Username: %(username)s Additional message: %(message)s ''' class RegistrationTemplateView(TemplateView): ''' Class for rendering registration pages. ''' def get_context_data(self, **kwargs): ''' Creates context for rendering page. ''' context = super(RegistrationTemplateView, self).get_context_data( **kwargs ) context['title'] = _('User registration') return context def mail_admins_contact(request, subject, message, context, sender): ''' Sends a message to the admins, as defined by the ADMINS setting. ''' LOGGER.info( 'contact form from %s', sender, ) if not settings.ADMINS: messages.error( request, _('Message could not be sent to administrator!') ) LOGGER.error( 'ADMINS not configured, can not send message!' ) return mail = EmailMultiAlternatives( '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject % context), message % context, to=[a[1] for a in settings.ADMINS], headers={'Reply-To': sender}, ) mail.send(fail_silently=False) messages.success( request, _('Message has been sent to administrator.') ) def deny_demo(request): """ Denies editing of demo account on demo server. """ messages.warning( request, _('You cannot change demo account on the demo server.') ) return redirect_profile(request.POST.get('activetab')) def redirect_profile(page=''): url = reverse('profile') if page and page.startswith('#'): url = url + page return HttpResponseRedirect(url) @login_required def user_profile(request): profile = request.user.profile if not profile.language: profile.language = get_language() profile.save() form_classes = [ ProfileForm, SubscriptionForm, SubscriptionSettingsForm, UserSettingsForm, DashboardSettingsForm, ] if request.method == 'POST': # Parse POST params forms = [form(request.POST, instance=profile) for form in form_classes] forms.append(UserForm(request.POST, instance=request.user)) if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) if all([form.is_valid() for form in forms]): # Save changes for form in forms: form.save() # Change language set_lang(request, request.user.profile) # Redirect after saving (and possibly changing language) response = redirect_profile(request.POST.get('activetab')) # Set language cookie and activate new language (for message below) lang_code = profile.language response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code) translation.activate(lang_code) messages.success(request, _('Your profile has been updated.')) return response else: forms = [form(instance=profile) for form in form_classes] forms.append(UserForm(instance=request.user)) social = request.user.social_auth.all() social_names = [assoc.provider for assoc in social] all_backends = set(load_backends(BACKENDS).keys()) new_backends = [ x for x in all_backends if x == 'email' or x not in social_names ] license_projects = SubProject.objects.filter( project__in=Project.objects.all_acl(request.user) ).exclude( license='' ) result = render( request, 'accounts/profile.html', { 'form': forms[0], 'subscriptionform': forms[1], 'subscriptionsettingsform': forms[2], 'usersettingsform': forms[3], 'dashboardsettingsform': forms[4], 'userform': forms[5], 'profile': profile, 'title': _('User profile'), 'licenses': license_projects, 'associated': social, 'new_backends': new_backends, } ) result.set_cookie( settings.LANGUAGE_COOKIE_NAME, profile.language ) return result @login_required def user_remove(request): if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) if request.method == 'POST': remove_user(request.user) logout(request) messages.success( request, _('Your account has been removed.') ) return redirect('home') return render( request, 'accounts/removal.html', ) def get_initial_contact(request): ''' Fills in initial contact form fields from request. ''' initial = {} if request.user.is_authenticated(): initial['name'] = request.user.first_name initial['email'] = request.user.email return initial def contact(request): if request.method == 'POST': form = ContactForm(request.POST) if form.is_valid(): mail_admins_contact( request, '%(subject)s', CONTACT_TEMPLATE, form.cleaned_data, form.cleaned_data['email'], ) return redirect('home') else: initial = get_initial_contact(request) if 'subject' in request.GET: initial['subject'] = request.GET['subject'] form = ContactForm(initial=initial) return render( request, 'accounts/contact.html', { 'form': form, 'title': _('Contact'), } ) @login_required def hosting(request): ''' Form for hosting request. ''' if not appsettings.OFFER_HOSTING: return redirect('home') if request.method == 'POST': form = HostingForm(request.POST) if form.is_valid(): context = form.cleaned_data context['username'] = request.user.username mail_admins_contact( request, 'Hosting request for %(project)s', HOSTING_TEMPLATE, context, form.cleaned_data['email'], ) return redirect('home') else: initial = get_initial_contact(request) form = HostingForm(initial=initial) return render( request, 'accounts/hosting.html', { 'form': form, 'title': _('Hosting'), } ) def user_page(request, user): ''' User details page. ''' user = get_object_or_404(User, username=user) profile = Profile.objects.get_or_create(user=user)[0] # Filter all user activity all_changes = Change.objects.last_changes(request.user).filter( user=user, ) # Last user activity last_changes = all_changes[:10] # Filter where project is active user_projects_ids = set(all_changes.values_list( 'translation__subproject__project', flat=True )) user_projects = Project.objects.filter(id__in=user_projects_ids) return render( request, 'accounts/user.html', { 'page_profile': profile, 'page_user': user, 'last_changes': last_changes, 'last_changes_url': urlencode( {'user': user.username.encode('utf-8')} ), 'user_projects': user_projects, } ) def user_avatar(request, user, size): ''' User avatar page. ''' user = get_object_or_404(User, username=user) if user.email == 'noreply@weblate.org': return redirect(get_fallback_avatar_url(size)) response = HttpResponse( content_type='image/png', content=get_avatar_image(request, user, size) ) patch_response_headers(response, 3600 * 24 * 7) return response def weblate_login(request): ''' Login handler, just wrapper around login. ''' # Redirect logged in users to profile if request.user.is_authenticated(): return redirect_profile() # Redirect if there is only one backend auth_backends = list(load_backends(BACKENDS).keys()) if len(auth_backends) == 1 and auth_backends[0] != 'email': return redirect('social:begin', auth_backends[0]) return auth_views.login( request, template_name='accounts/login.html', authentication_form=LoginForm, extra_context={ 'login_backends': [ x for x in auth_backends if x != 'email' ], 'can_reset': 'email' in auth_backends, 'title': _('Login'), } ) @login_required def weblate_logout(request): ''' Logout handler, just wrapper around standard logout. ''' messages.info(request, _('Thanks for using Weblate!')) return auth_views.logout( request, next_page=reverse('home'), ) def register(request): ''' Registration form. ''' if appsettings.REGISTRATION_CAPTCHA: form_class = CaptchaRegistrationForm else: form_class = RegistrationForm if request.method == 'POST': form = form_class(request.POST) if form.is_valid() and appsettings.REGISTRATION_OPEN: # Ensure we do registration in separate session # not sent to client request.session.create() result = complete(request, 'email') request.session.save() request.session = None return result else: form = form_class() backends = set(load_backends(BACKENDS).keys()) # Redirect if there is only one backend if len(backends) == 1 and 'email' not in backends: return redirect('social:begin', backends.pop()) return render( request, 'accounts/register.html', { 'registration_email': 'email' in backends, 'registration_backends': backends - set(['email']), 'title': _('User registration'), 'form': form, } ) @login_required def email_login(request): ''' Connect email. ''' if request.method == 'POST': form = EmailForm(request.POST) if form.is_valid(): return complete(request, 'email') else: form = EmailForm() return render( request, 'accounts/email.html', { 'title': _('Register email'), 'form': form, } ) @login_required def password(request): ''' Password change / set form. ''' if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) do_change = False if not request.user.has_usable_password(): do_change = True change_form = None elif request.method == 'POST': change_form = PasswordChangeForm(request.POST) if change_form.is_valid(): cur_password = change_form.cleaned_data['password'] do_change = request.user.check_password(cur_password) if not do_change: messages.error( request, _('You have entered an invalid password.') ) else: change_form = PasswordChangeForm() if request.method == 'POST': form = PasswordForm(request.POST) if form.is_valid() and do_change: # Clear flag forcing user to set password redirect_page = '#auth' if 'show_set_password' in request.session: del request.session['show_set_password'] redirect_page = '' request.user.set_password( form.cleaned_data['password1'] ) request.user.save() # Update session hash update_session_auth_hash(request, request.user) messages.success( request, _('Your password has been changed.') ) return redirect_profile(redirect_page) else: form = PasswordForm() return render( request, 'accounts/password.html', { 'title': _('Change password'), 'change_form': change_form, 'form': form, } ) def reset_password(request): ''' Password reset handling. ''' if 'email' not in load_backends(BACKENDS).keys(): messages.error( request, _('Can not reset password, email authentication is disabled!') ) return redirect('login') if request.method == 'POST': form = ResetForm(request.POST) if form.is_valid(): # Force creating new session request.session.create() if request.user.is_authenticated(): logout(request) request.session['password_reset'] = True return complete(request, 'email') else: form = ResetForm() return render( request, 'accounts/reset.html', { 'title': _('Password reset'), 'form': form, } ) @login_required def reset_api_key(request): """Resets user API key""" request.user.auth_token.delete() Token.objects.create( user=request.user, key=get_random_string(40) ) return redirect_profile('#api') @login_required def watch(request, project): obj = get_project(request, project) request.user.profile.subscriptions.add(obj) return redirect(obj) @login_required def unwatch(request, project): obj = get_project(request, project) request.user.profile.subscriptions.remove(obj) return redirect(obj)
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3112_1
crossvul-python_data_good_3325_1
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils import salt.utils.url import salt.crypt import salt.transport from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs import salt.ext.six as six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['environment'], pillar=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.mkstemp() with salt.utils.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(contents) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False): ''' Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url(path, dest, makedirs, saltenv) else: result = _client().get_url(path, None, makedirs, saltenv, no_cache=True) if not result: log.error( 'Unable to fetch file {0} from saltenv {1}.'.format( path, saltenv ) ) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): with salt.utils.fopen(fn_, 'r') as fp_: data = fp_.read() return data return fn_ def cache_file(path, saltenv='base'): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion. CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv) if not result: log.error( 'Unable to cache file \'{0}\' from saltenv \'{1}\'.'.format( path, saltenv ) ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def stat_file(path, saltenv='base', octal=True): ''' Return the permissions of a file, to get the permissions of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.stat_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv stat = _client().hash_and_stat_file(path, saltenv)[1] if stat is None: return stat return salt.utils.st_mode_to_octal(stat[0]) if octal is True else stat[0] def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'{0}\' to master'.format(path)) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: {0}'.format(upload_path)) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'tok': auth.gen_token('salt')} channel = salt.transport.Channel.factory(__opts__) with salt.utils.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.rm_rf(path) log.debug('Removing source file \'{0}\''.format(path)) except IOError: log.error('cp.push failed to remove file \ \'{0}\''.format(path)) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in os.walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
./CrossVul/dataset_final_sorted/CWE-200/py/good_3325_1
crossvul-python_data_bad_1574_0
""" Gather information about a system and report it using plugins supplied for application-specific information """ # sosreport.py # gather information about a system and report it # Copyright (C) 2006 Steve Conklin <sconklin@redhat.com> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. import sys import traceback import os import errno import logging from optparse import OptionParser, Option from sos.plugins import import_plugin from sos.utilities import ImporterHelper from stat import ST_UID, ST_GID, ST_MODE, ST_CTIME, ST_ATIME, ST_MTIME, S_IMODE from time import strftime, localtime from collections import deque import tempfile from sos import _sos as _ from sos import __version__ import sos.policies from sos.archive import TarFileArchive, ZipFileArchive from sos.reporting import (Report, Section, Command, CopiedFile, CreatedFile, Alert, Note, PlainTextReport) # PYCOMPAT import six from six.moves import zip, input if six.PY3: from configparser import ConfigParser else: from ConfigParser import ConfigParser from six import print_ # file system errors that should terminate a run fatal_fs_errors = (errno.ENOSPC, errno.EROFS) def _format_list(first_line, items, indent=False): lines = [] line = first_line if indent: newline = len(first_line) * ' ' else: newline = "" for item in items: if len(line) + len(item) + 2 > 72: lines.append(line) line = newline line = line + item + ', ' if line[-2:] == ', ': line = line[:-2] lines.append(line) return lines class TempFileUtil(object): def __init__(self, tmp_dir): self.tmp_dir = tmp_dir self.files = [] def new(self): fd, fname = tempfile.mkstemp(dir=self.tmp_dir) fobj = open(fname, 'w') self.files.append((fname, fobj)) return fobj def clean(self): for fname, f in self.files: try: f.flush() f.close() except Exception: pass try: os.unlink(fname) except Exception: pass self.files = [] class OptionParserExtended(OptionParser): """ Show examples """ def print_help(self, out=sys.stdout): """ Prints help content including examples """ OptionParser.print_help(self, out) print_() print_("Some examples:") print_() print_(" enable cluster plugin only and collect dlm lockdumps:") print_(" # sosreport -o cluster -k cluster.lockdump") print_() print_(" disable memory and samba plugins, turn off rpm -Va " "collection:") print_(" # sosreport -n memory,samba -k rpm.rpmva=off") print_() class SosOption(Option): """Allow to specify comma delimited list of plugins""" ACTIONS = Option.ACTIONS + ("extend",) STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) def take_action(self, action, dest, opt, value, values, parser): """ Performs list extension on plugins """ if action == "extend": try: lvalue = value.split(",") except: pass else: values.ensure_value(dest, deque()).extend(lvalue) else: Option.take_action(self, action, dest, opt, value, values, parser) class XmlReport(object): """ Report build class """ def __init__(self): try: import libxml2 except ImportError: self.enabled = False return else: self.enabled = False return self.doc = libxml2.newDoc("1.0") self.root = self.doc.newChild(None, "sos", None) self.commands = self.root.newChild(None, "commands", None) self.files = self.root.newChild(None, "files", None) def add_command(self, cmdline, exitcode, stdout=None, stderr=None, f_stdout=None, f_stderr=None, runtime=None): """ Appends command run into report """ if not self.enabled: return cmd = self.commands.newChild(None, "cmd", None) cmd.setNsProp(None, "cmdline", cmdline) cmdchild = cmd.newChild(None, "exitcode", str(exitcode)) if runtime: cmd.newChild(None, "runtime", str(runtime)) if stdout or f_stdout: cmdchild = cmd.newChild(None, "stdout", stdout) if f_stdout: cmdchild.setNsProp(None, "file", f_stdout) if stderr or f_stderr: cmdchild = cmd.newChild(None, "stderr", stderr) if f_stderr: cmdchild.setNsProp(None, "file", f_stderr) def add_file(self, fname, stats): """ Appends file(s) added to report """ if not self.enabled: return cfile = self.files.newChild(None, "file", None) cfile.setNsProp(None, "fname", fname) cchild = cfile.newChild(None, "uid", str(stats[ST_UID])) cchild = cfile.newChild(None, "gid", str(stats[ST_GID])) cfile.newChild(None, "mode", str(oct(S_IMODE(stats[ST_MODE])))) cchild = cfile.newChild(None, "ctime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_CTIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_CTIME])) cchild = cfile.newChild(None, "atime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_ATIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_ATIME])) cchild = cfile.newChild(None, "mtime", strftime('%a %b %d %H:%M:%S %Y', localtime(stats[ST_MTIME]))) cchild.setNsProp(None, "tstamp", str(stats[ST_MTIME])) def serialize(self): """ Serializes xml """ if not self.enabled: return self.ui_log.info(self.doc.serialize(None, 1)) def serialize_to_file(self, fname): """ Serializes to file """ if not self.enabled: return outf = tempfile.NamedTemporaryFile() outf.write(self.doc.serialize(None, 1)) outf.flush() self.archive.add_file(outf.name, dest=fname) outf.close() class SoSOptions(object): _list_plugins = False _noplugins = [] _enableplugins = [] _onlyplugins = [] _plugopts = [] _usealloptions = False _all_logs = False _log_size = 10 _batch = False _build = False _verbosity = 0 _verify = False _quiet = False _debug = False _case_id = "" _customer_name = "" _profiles = deque() _list_profiles = False _config_file = "" _tmp_dir = "" _report = True _compression_type = 'auto' _options = None def __init__(self, args=None): if args: self._options = self._parse_args(args) else: self._options = None def _check_options_initialized(self): if self._options is not None: raise ValueError("SoSOptions object already initialized " + "from command line") @property def list_plugins(self): if self._options is not None: return self._options.list_plugins return self._list_plugins @list_plugins.setter def list_plugins(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.list_plugins expects a boolean") self._list_plugins = value @property def noplugins(self): if self._options is not None: return self._options.noplugins return self._noplugins @noplugins.setter def noplugins(self, value): self._check_options_initialized() self._noplugins = value @property def enableplugins(self): if self._options is not None: return self._options.enableplugins return self._enableplugins @enableplugins.setter def enableplugins(self, value): self._check_options_initialized() self._enableplugins = value @property def onlyplugins(self): if self._options is not None: return self._options.onlyplugins return self._onlyplugins @onlyplugins.setter def onlyplugins(self, value): self._check_options_initialized() self._onlyplugins = value @property def plugopts(self): if self._options is not None: return self._options.plugopts return self._plugopts @plugopts.setter def plugopts(self, value): # If we check for anything it should be itterability. # if not isinstance(value, list): # raise TypeError("SoSOptions.plugopts expects a list") self._plugopts = value @property def usealloptions(self): if self._options is not None: return self._options.usealloptions return self._usealloptions @usealloptions.setter def usealloptions(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.usealloptions expects a boolean") self._usealloptions = value @property def all_logs(self): if self._options is not None: return self._options.all_logs return self._all_logs @all_logs.setter def all_logs(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.all_logs expects a boolean") self._all_logs = value @property def log_size(self): if self._options is not None: return self._options.log_size return self._log_size @log_size.setter def log_size(self, value): self._check_options_initialized() if value < 0: raise ValueError("SoSOptions.log_size expects a value greater " "than zero") self._log_size = value @property def batch(self): if self._options is not None: return self._options.batch return self._batch @batch.setter def batch(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.batch expects a boolean") self._batch = value @property def build(self): if self._options is not None: return self._options.build return self._build @build.setter def build(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.build expects a boolean") self._build = value @property def verbosity(self): if self._options is not None: return self._options.verbosity return self._verbosity @verbosity.setter def verbosity(self, value): self._check_options_initialized() if value < 0 or value > 3: raise ValueError("SoSOptions.verbosity expects a value [0..3]") self._verbosity = value @property def verify(self): if self._options is not None: return self._options.verify return self._verify @verify.setter def verify(self, value): self._check_options_initialized() if value < 0 or value > 3: raise ValueError("SoSOptions.verify expects a value [0..3]") self._verify = value @property def quiet(self): if self._options is not None: return self._options.quiet return self._quiet @quiet.setter def quiet(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.quiet expects a boolean") self._quiet = value @property def debug(self): if self._options is not None: return self._options.debug return self._debug @debug.setter def debug(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.debug expects a boolean") self._debug = value @property def case_id(self): if self._options is not None: return self._options.case_id return self._case_id @case_id.setter def case_id(self, value): self._check_options_initialized() self._case_id = value @property def customer_name(self): if self._options is not None: return self._options.customer_name return self._customer_name @customer_name.setter def customer_name(self, value): self._check_options_initialized() self._customer_name = value @property def profiles(self): if self._options is not None: return self._options.profiles return self._profiles @profiles.setter def profiles(self, value): self._check_options_initialized() self._profiles = value @property def list_profiles(self): if self._options is not None: return self._options.list_profiles return self._list_profiles @list_profiles.setter def list_profiles(self, value): self._check_options_initialized() self._list_profiles = value @property def config_file(self): if self._options is not None: return self._options.config_file return self._config_file @config_file.setter def config_file(self, value): self._check_options_initialized() self._config_file = value @property def tmp_dir(self): if self._options is not None: return self._options.tmp_dir return self._tmp_dir @tmp_dir.setter def tmp_dir(self, value): self._check_options_initialized() self._tmp_dir = value @property def report(self): if self._options is not None: return self._options.report return self._report @report.setter def report(self, value): self._check_options_initialized() if not isinstance(value, bool): raise TypeError("SoSOptions.report expects a boolean") self._report = value @property def compression_type(self): if self._options is not None: return self._options.compression_type return self._compression_type @compression_type.setter def compression_type(self, value): self._check_options_initialized() self._compression_type = value def _parse_args(self, args): """ Parse command line options and arguments""" self.parser = parser = OptionParserExtended(option_class=SosOption) parser.add_option("-l", "--list-plugins", action="store_true", dest="list_plugins", default=False, help="list plugins and available plugin options") parser.add_option("-n", "--skip-plugins", action="extend", dest="noplugins", type="string", help="disable these plugins", default=deque()) parser.add_option("-e", "--enable-plugins", action="extend", dest="enableplugins", type="string", help="enable these plugins", default=deque()) parser.add_option("-o", "--only-plugins", action="extend", dest="onlyplugins", type="string", help="enable these plugins only", default=deque()) parser.add_option("-k", "--plugin-option", action="extend", dest="plugopts", type="string", help="plugin options in plugname.option=value " "format (see -l)", default=deque()) parser.add_option("--log-size", action="store", dest="log_size", default=10, type="int", help="set a limit on the size of collected logs") parser.add_option("-a", "--alloptions", action="store_true", dest="usealloptions", default=False, help="enable all options for loaded plugins") parser.add_option("--all-logs", action="store_true", dest="all_logs", default=False, help="collect all available logs regardless of size") parser.add_option("--batch", action="store_true", dest="batch", default=False, help="batch mode - do not prompt interactively") parser.add_option("--build", action="store_true", dest="build", default=False, help="preserve the temporary directory and do not " "package results") parser.add_option("-v", "--verbose", action="count", dest="verbosity", help="increase verbosity") parser.add_option("", "--verify", action="store_true", dest="verify", default=False, help="perform data verification during collection") parser.add_option("", "--quiet", action="store_true", dest="quiet", default=False, help="only print fatal errors") parser.add_option("--debug", action="count", dest="debug", help="enable interactive debugging using the python " "debugger") parser.add_option("--ticket-number", action="store", dest="case_id", help="specify ticket number") parser.add_option("--case-id", action="store", dest="case_id", help="specify case identifier") parser.add_option("-p", "--profile", action="extend", dest="profiles", type="string", default=deque(), help="enable plugins selected by the given profiles") parser.add_option("--list-profiles", action="store_true", dest="list_profiles", default=False) parser.add_option("--name", action="store", dest="customer_name", help="specify report name") parser.add_option("--config-file", action="store", dest="config_file", help="specify alternate configuration file") parser.add_option("--tmp-dir", action="store", dest="tmp_dir", help="specify alternate temporary directory", default=None) parser.add_option("--no-report", action="store_true", dest="report", help="Disable HTML/XML reporting", default=False) parser.add_option("-z", "--compression-type", dest="compression_type", help="compression technology to use [auto, zip, " "gzip, bzip2, xz] (default=auto)", default="auto") return parser.parse_args(args)[0] class SoSReport(object): """The main sosreport class""" def __init__(self, args): self.loaded_plugins = deque() self.skipped_plugins = deque() self.all_options = deque() self.xml_report = XmlReport() self.global_plugin_options = {} self.archive = None self.tempfile_util = None self._args = args try: import signal signal.signal(signal.SIGTERM, self.get_exit_handler()) except Exception: pass # not available in java, but we don't care self.opts = SoSOptions(args) self._set_debug() self._read_config() try: self.policy = sos.policies.load() except KeyboardInterrupt: self._exit(0) self._is_root = self.policy.is_root() self.tmpdir = os.path.abspath( self.policy.get_tmp_dir(self.opts.tmp_dir)) if not os.path.isdir(self.tmpdir) \ or not os.access(self.tmpdir, os.W_OK): # write directly to stderr as logging is not initialised yet sys.stderr.write("temporary directory %s " % self.tmpdir + "does not exist or is not writable\n") self._exit(1) self.tempfile_util = TempFileUtil(self.tmpdir) self._set_directories() def print_header(self): self.ui_log.info("\n%s\n" % _("sosreport (version %s)" % (__version__,))) def get_commons(self): return { 'cmddir': self.cmddir, 'logdir': self.logdir, 'rptdir': self.rptdir, 'tmpdir': self.tmpdir, 'soslog': self.soslog, 'policy': self.policy, 'verbosity': self.opts.verbosity, 'xmlreport': self.xml_report, 'cmdlineopts': self.opts, 'config': self.config, 'global_plugin_options': self.global_plugin_options, } def get_temp_file(self): return self.tempfile_util.new() def _set_archive(self): archive_name = os.path.join(self.tmpdir, self.policy.get_archive_name()) if self.opts.compression_type == 'auto': auto_archive = self.policy.get_preferred_archive() self.archive = auto_archive(archive_name, self.tmpdir) elif self.opts.compression_type == 'zip': self.archive = ZipFileArchive(archive_name, self.tmpdir) else: self.archive = TarFileArchive(archive_name, self.tmpdir) self.archive.set_debug(True if self.opts.debug else False) def _make_archive_paths(self): self.archive.makedirs(self.cmddir, 0o755) self.archive.makedirs(self.logdir, 0o755) self.archive.makedirs(self.rptdir, 0o755) def _set_directories(self): self.cmddir = 'sos_commands' self.logdir = 'sos_logs' self.rptdir = 'sos_reports' def _set_debug(self): if self.opts.debug: sys.excepthook = self._exception self.raise_plugins = True else: self.raise_plugins = False @staticmethod def _exception(etype, eval_, etrace): """ Wrap exception in debugger if not in tty """ if hasattr(sys, 'ps1') or not sys.stderr.isatty(): # we are in interactive mode or we don't have a tty-like # device, so we call the default hook sys.__excepthook__(etype, eval_, etrace) else: import pdb # we are NOT in interactive mode, print the exception... traceback.print_exception(etype, eval_, etrace, limit=2, file=sys.stdout) print_() # ...then start the debugger in post-mortem mode. pdb.pm() def _exit(self, error=0): raise SystemExit() # sys.exit(error) def get_exit_handler(self): def exit_handler(signum, frame): self._exit() return exit_handler def _read_config(self): self.config = ConfigParser() if self.opts.config_file: config_file = self.opts.config_file else: config_file = '/etc/sos.conf' try: self.config.readfp(open(config_file)) except IOError: pass def _setup_logging(self): # main soslog self.soslog = logging.getLogger('sos') self.soslog.setLevel(logging.DEBUG) self.sos_log_file = self.get_temp_file() self.sos_log_file.close() flog = logging.FileHandler(self.sos_log_file.name) flog.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s')) flog.setLevel(logging.INFO) self.soslog.addHandler(flog) if not self.opts.quiet: console = logging.StreamHandler(sys.stderr) console.setFormatter(logging.Formatter('%(message)s')) if self.opts.verbosity and self.opts.verbosity > 1: console.setLevel(logging.DEBUG) flog.setLevel(logging.DEBUG) elif self.opts.verbosity and self.opts.verbosity > 0: console.setLevel(logging.INFO) flog.setLevel(logging.DEBUG) else: console.setLevel(logging.WARNING) self.soslog.addHandler(console) # ui log self.ui_log = logging.getLogger('sos_ui') self.ui_log.setLevel(logging.INFO) self.sos_ui_log_file = self.get_temp_file() self.sos_ui_log_file.close() ui_fhandler = logging.FileHandler(self.sos_ui_log_file.name) ui_fhandler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s')) self.ui_log.addHandler(ui_fhandler) if not self.opts.quiet: ui_console = logging.StreamHandler(sys.stdout) ui_console.setFormatter(logging.Formatter('%(message)s')) ui_console.setLevel(logging.INFO) self.ui_log.addHandler(ui_console) def _finish_logging(self): logging.shutdown() # Make sure the log files are added before we remove the log # handlers. This prevents "No handlers could be found.." messages # from leaking to the console when running in --quiet mode when # Archive classes attempt to acess the log API. if getattr(self, "sos_log_file", None): self.archive.add_file(self.sos_log_file.name, dest=os.path.join('sos_logs', 'sos.log')) if getattr(self, "sos_ui_log_file", None): self.archive.add_file(self.sos_ui_log_file.name, dest=os.path.join('sos_logs', 'ui.log')) def _get_disabled_plugins(self): disabled = [] if self.config.has_option("plugins", "disable"): disabled = [plugin.strip() for plugin in self.config.get("plugins", "disable").split(',')] return disabled def _is_in_profile(self, plugin_class): onlyplugins = self.opts.onlyplugins if not len(self.opts.profiles): return True if not hasattr(plugin_class, "profiles"): return False if onlyplugins and not self._is_not_specified(plugin_class.name()): return True return any([p in self.opts.profiles for p in plugin_class.profiles]) def _is_skipped(self, plugin_name): return (plugin_name in self.opts.noplugins or plugin_name in self._get_disabled_plugins()) def _is_inactive(self, plugin_name, pluginClass): return (not pluginClass(self.get_commons()).check_enabled() and plugin_name not in self.opts.enableplugins and plugin_name not in self.opts.onlyplugins) def _is_not_default(self, plugin_name, pluginClass): return (not pluginClass(self.get_commons()).default_enabled() and plugin_name not in self.opts.enableplugins and plugin_name not in self.opts.onlyplugins) def _is_not_specified(self, plugin_name): return (self.opts.onlyplugins and plugin_name not in self.opts.onlyplugins) def _skip(self, plugin_class, reason="unknown"): self.skipped_plugins.append(( plugin_class.name(), plugin_class(self.get_commons()), reason )) def _load(self, plugin_class): self.loaded_plugins.append(( plugin_class.name(), plugin_class(self.get_commons()) )) def load_plugins(self): import sos.plugins helper = ImporterHelper(sos.plugins) plugins = helper.get_modules() self.plugin_names = deque() self.profiles = set() using_profiles = len(self.opts.profiles) # validate and load plugins for plug in plugins: plugbase, ext = os.path.splitext(plug) try: plugin_classes = import_plugin( plugbase, tuple(self.policy.valid_subclasses)) if not len(plugin_classes): # no valid plugin classes for this policy continue plugin_class = self.policy.match_plugin(plugin_classes) if not self.policy.validate_plugin(plugin_class): self.soslog.warning( _("plugin %s does not validate, skipping") % plug) if self.opts.verbosity > 0: self._skip(plugin_class, _("does not validate")) continue if plugin_class.requires_root and not self._is_root: self.soslog.info(_("plugin %s requires root permissions" "to execute, skipping") % plug) self._skip(plugin_class, _("requires root")) continue # plug-in is valid, let's decide whether run it or not self.plugin_names.append(plugbase) if hasattr(plugin_class, "profiles"): self.profiles.update(plugin_class.profiles) in_profile = self._is_in_profile(plugin_class) if not in_profile: self._skip(plugin_class, _("excluded")) continue if self._is_skipped(plugbase): self._skip(plugin_class, _("skipped")) continue if self._is_inactive(plugbase, plugin_class): self._skip(plugin_class, _("inactive")) continue if self._is_not_default(plugbase, plugin_class): self._skip(plugin_class, _("optional")) continue # true when the null (empty) profile is active default_profile = not using_profiles and in_profile if self._is_not_specified(plugbase) and default_profile: self._skip(plugin_class, _("not specified")) continue self._load(plugin_class) except Exception as e: self.soslog.warning(_("plugin %s does not install, " "skipping: %s") % (plug, e)) if self.raise_plugins: raise def _set_all_options(self): if self.opts.usealloptions: for plugname, plug in self.loaded_plugins: for name, parms in zip(plug.opt_names, plug.opt_parms): if type(parms["enabled"]) == bool: parms["enabled"] = True def _set_tunables(self): if self.config.has_section("tunables"): if not self.opts.plugopts: self.opts.plugopts = deque() for opt, val in self.config.items("tunables"): if not opt.split('.')[0] in self._get_disabled_plugins(): self.opts.plugopts.append(opt + "=" + val) if self.opts.plugopts: opts = {} for opt in self.opts.plugopts: # split up "general.syslogsize=5" try: opt, val = opt.split("=") except: val = True else: if val.lower() in ["off", "disable", "disabled", "false"]: val = False else: # try to convert string "val" to int() try: val = int(val) except: pass # split up "general.syslogsize" try: plug, opt = opt.split(".") except: plug = opt opt = True try: opts[plug] except KeyError: opts[plug] = deque() opts[plug].append((opt, val)) for plugname, plug in self.loaded_plugins: if plugname in opts: for opt, val in opts[plugname]: if not plug.set_option(opt, val): self.soslog.error('no such option "%s" for plugin ' '(%s)' % (opt, plugname)) self._exit(1) del opts[plugname] for plugname in opts.keys(): self.soslog.error('unable to set option for disabled or ' 'non-existing plugin (%s)' % (plugname)) def _check_for_unknown_plugins(self): import itertools for plugin in itertools.chain(self.opts.onlyplugins, self.opts.noplugins, self.opts.enableplugins): plugin_name = plugin.split(".")[0] if plugin_name not in self.plugin_names: self.soslog.fatal('a non-existing plugin (%s) was specified ' 'in the command line' % (plugin_name)) self._exit(1) def _set_plugin_options(self): for plugin_name, plugin in self.loaded_plugins: names, parms = plugin.get_all_options() for optname, optparm in zip(names, parms): self.all_options.append((plugin, plugin_name, optname, optparm)) def list_plugins(self): if not self.loaded_plugins and not self.skipped_plugins: self.soslog.fatal(_("no valid plugins found")) return if self.loaded_plugins: self.ui_log.info(_("The following plugins are currently enabled:")) self.ui_log.info("") for (plugname, plug) in self.loaded_plugins: self.ui_log.info(" %-20s %s" % (plugname, plug.get_description())) else: self.ui_log.info(_("No plugin enabled.")) self.ui_log.info("") if self.skipped_plugins: self.ui_log.info(_("The following plugins are currently " "disabled:")) self.ui_log.info("") for (plugname, plugclass, reason) in self.skipped_plugins: self.ui_log.info(" %-20s %-14s %s" % ( plugname, reason, plugclass.get_description())) self.ui_log.info("") if self.all_options: self.ui_log.info(_("The following plugin options are available:")) self.ui_log.info("") for (plug, plugname, optname, optparm) in self.all_options: # format option value based on its type (int or bool) if type(optparm["enabled"]) == bool: if optparm["enabled"] is True: tmpopt = "on" else: tmpopt = "off" else: tmpopt = optparm["enabled"] self.ui_log.info(" %-25s %-15s %s" % ( plugname + "." + optname, tmpopt, optparm["desc"])) else: self.ui_log.info(_("No plugin options available.")) self.ui_log.info("") profiles = list(self.profiles) profiles.sort() lines = _format_list("Profiles: ", profiles, indent=True) for line in lines: self.ui_log.info(" %s" % line) self.ui_log.info("") self.ui_log.info(" %d profiles, %d plugins" % (len(self.profiles), len(self.loaded_plugins))) self.ui_log.info("") def list_profiles(self): if not self.profiles: self.soslog.fatal(_("no valid profiles found")) return self.ui_log.info(_("The following profiles are available:")) self.ui_log.info("") def _has_prof(c): return hasattr(c, "profiles") profiles = list(self.profiles) profiles.sort() for profile in profiles: plugins = [] for name, plugin in self.loaded_plugins: if _has_prof(plugin) and profile in plugin.profiles: plugins.append(name) lines = _format_list("%-15s " % profile, plugins, indent=True) for line in lines: self.ui_log.info(" %s" % line) self.ui_log.info("") self.ui_log.info(" %d profiles, %d plugins" % (len(profiles), len(self.loaded_plugins))) self.ui_log.info("") def batch(self): if self.opts.batch: self.ui_log.info(self.policy.get_msg()) else: msg = self.policy.get_msg() msg += _("Press ENTER to continue, or CTRL-C to quit.\n") try: input(msg) except: self.ui_log.info("") self._exit() def _log_plugin_exception(self, plugin_name): self.soslog.error("%s\n%s" % (plugin_name, traceback.format_exc())) def prework(self): self.policy.pre_work() try: self.ui_log.info(_(" Setting up archive ...")) compression_methods = ('auto', 'zip', 'bzip2', 'gzip', 'xz') method = self.opts.compression_type if method not in compression_methods: compression_list = ', '.join(compression_methods) self.ui_log.error("") self.ui_log.error("Invalid compression specified: " + method) self.ui_log.error("Valid types are: " + compression_list) self.ui_log.error("") self._exit(1) self._set_archive() self._make_archive_paths() return except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while setting up archive" % e.strerror) self.ui_log.error("") else: raise e except Exception as e: import traceback self.ui_log.error("") self.ui_log.error(" Unexpected exception setting up archive:") traceback.print_exc(e) self.ui_log.error(e) self._exit(1) def setup(self): msg = "[%s:%s] executing 'sosreport %s'" self.soslog.info(msg % (__name__, "setup", " ".join(self._args))) self.ui_log.info(_(" Setting up plugins ...")) for plugname, plug in self.loaded_plugins: try: plug.archive = self.archive plug.setup() except KeyboardInterrupt: raise except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while setting up plugins" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise else: self._log_plugin_exception(plugname) def version(self): """Fetch version information from all plugins and store in the report version file""" versions = [] versions.append("sosreport: %s" % __version__) for plugname, plug in self.loaded_plugins: versions.append("%s: %s" % (plugname, plug.version)) self.archive.add_string(content="\n".join(versions), dest='version.txt') def collect(self): self.ui_log.info(_(" Running plugins. Please wait ...")) self.ui_log.info("") plugruncount = 0 for i in zip(self.loaded_plugins): plugruncount += 1 plugname, plug = i[0] status_line = (" Running %d/%d: %s... " % (plugruncount, len(self.loaded_plugins), plugname)) if self.opts.verbosity == 0: status_line = "\r%s" % status_line else: status_line = "%s\n" % status_line if not self.opts.quiet: sys.stdout.write(status_line) sys.stdout.flush() try: plug.collect() except KeyboardInterrupt: raise except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while collecting plugin data" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise else: self._log_plugin_exception(plugname) self.ui_log.info("") def report(self): for plugname, plug in self.loaded_plugins: for oneFile in plug.copied_files: try: self.xml_report.add_file(oneFile["srcpath"], os.stat(oneFile["srcpath"])) except: pass try: self.xml_report.serialize_to_file(os.path.join(self.rptdir, "sosreport.xml")) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing report data" % e.strerror) self.ui_log.error("") self._exit(1) def plain_report(self): report = Report() for plugname, plug in self.loaded_plugins: section = Section(name=plugname) for alert in plug.alerts: section.add(Alert(alert)) if plug.custom_text: section.add(Note(plug.custom_text)) for f in plug.copied_files: section.add(CopiedFile(name=f['srcpath'], href=".." + f['dstpath'])) for cmd in plug.executed_commands: section.add(Command(name=cmd['exe'], return_code=0, href="../" + cmd['file'])) for content, f in plug.copy_strings: section.add(CreatedFile(name=f)) report.add(section) try: fd = self.get_temp_file() fd.write(str(PlainTextReport(report))) fd.flush() self.archive.add_file(fd.name, dest=os.path.join('sos_reports', 'sos.txt')) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing text report" % e.strerror) self.ui_log.error("") self._exit(1) def html_report(self): try: self._html_report() except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while writing HTML report" % e.strerror) self.ui_log.error("") self._exit(1) def _html_report(self): # Generate the header for the html output file rfd = self.get_temp_file() rfd.write(""" <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <head> <link rel="stylesheet" type="text/css" media="screen" href="donot.css" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Sos System Report</title> </head> <body> """) # Make a pass to gather Alerts and a list of module names allAlerts = deque() plugNames = deque() for plugname, plug in self.loaded_plugins: for alert in plug.alerts: allAlerts.append('<a href="#%s">%s</a>: %s' % (plugname, plugname, alert)) plugNames.append(plugname) # Create a table of links to the module info rfd.write("<hr/><h3>Loaded Plugins:</h3>") rfd.write("<table><tr>\n") rr = 0 for i in range(len(plugNames)): rfd.write('<td><a href="#%s">%s</a></td>\n' % (plugNames[i], plugNames[i])) rr = divmod(i, 4)[1] if (rr == 3): rfd.write('</tr>') if not (rr == 3): rfd.write('</tr>') rfd.write('</table>\n') rfd.write('<hr/><h3>Alerts:</h3>') rfd.write('<ul>') for alert in allAlerts: rfd.write('<li>%s</li>' % alert) rfd.write('</ul>') # Call the report method for each plugin for plugname, plug in self.loaded_plugins: try: html = plug.report() except: if self.raise_plugins: raise else: rfd.write(html) rfd.write("</body></html>") rfd.flush() self.archive.add_file(rfd.name, dest=os.path.join('sos_reports', 'sos.html')) def postproc(self): for plugname, plug in self.loaded_plugins: try: plug.postproc() except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while post-processing plugin data" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.raise_plugins: raise def final_work(self): # this must come before archive creation to ensure that log # files are closed and cleaned up at exit. self._finish_logging() # package up the results for the support organization if not self.opts.build: if not self.opts.quiet: print(_("Creating compressed archive...")) # compression could fail for a number of reasons try: final_filename = self.archive.finalize( self.opts.compression_type) except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") self.ui_log.error(" %s while finalizing archive" % e.strerror) self.ui_log.error("") self._exit(1) except: if self.opts.debug: raise else: return False else: final_filename = self.archive.get_archive_path() self.policy.display_results(final_filename, build=self.opts.build) self.tempfile_util.clean() return True def verify_plugins(self): if not self.loaded_plugins: self.soslog.error(_("no valid plugins were enabled")) return False return True def set_global_plugin_option(self, key, value): self.global_plugin_options[key] = value def execute(self): try: self._setup_logging() self.policy.set_commons(self.get_commons()) self.print_header() self.load_plugins() self._set_all_options() self._set_tunables() self._check_for_unknown_plugins() self._set_plugin_options() if self.opts.list_plugins: self.list_plugins() return True if self.opts.list_profiles: self.list_profiles() return True # verify that at least one plug-in is enabled if not self.verify_plugins(): return False self.batch() self.prework() self.setup() self.collect() if not self.opts.report: self.report() self.html_report() self.plain_report() self.postproc() self.version() return self.final_work() except (SystemExit, KeyboardInterrupt): if self.archive: self.archive.cleanup() if self.tempfile_util: self.tempfile_util.clean() return False def main(args): """The main entry point""" sos = SoSReport(args) sos.execute() # vim: et ts=4 sw=4
./CrossVul/dataset_final_sorted/CWE-200/py/bad_1574_0
crossvul-python_data_good_1725_0
# -*- coding: utf-8 -*- ''' Support for the Git SCM ''' from __future__ import absolute_import # Import python libs import os import re import subprocess # Import salt libs from salt import utils from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error from salt.ext.six.moves.urllib.parse import urlunparse as _urlunparse # pylint: disable=no-name-in-module,import-error def __virtual__(): ''' Only load if git exists on the system ''' return True if utils.which('git') else False def _git_run(cmd, cwd=None, runas=None, identity=None, **kwargs): ''' simple, throw an exception with the error message on an error return code. this function may be moved to the command module, spliced with 'cmd.run_all', and used as an alternative to 'cmd.run_all'. Some commands don't return proper retcodes, so this can't replace 'cmd.run_all'. ''' env = {} if identity: stderrs = [] # if the statefile provides multiple identities, they need to be tried # (but also allow a string instead of a list) if not isinstance(identity, list): # force it into a list identity = [identity] # try each of the identities, independently for id_file in identity: env = { 'GIT_IDENTITY': id_file } # copy wrapper to area accessible by ``runas`` user # currently no suppport in windows for wrapping git ssh if not utils.is_windows(): ssh_id_wrapper = os.path.join(utils.templates.TEMPLATE_DIRNAME, 'git/ssh-id-wrapper') tmp_file = utils.mkstemp() utils.files.copyfile(ssh_id_wrapper, tmp_file) os.chmod(tmp_file, 0o500) os.chown(tmp_file, __salt__['file.user_to_uid'](runas), -1) env['GIT_SSH'] = tmp_file try: result = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=runas, output_loglevel='quiet', env=env, python_shell=False, **kwargs) finally: if 'GIT_SSH' in env: os.remove(env['GIT_SSH']) # if the command was successful, no need to try additional IDs if result['retcode'] == 0: return result['stdout'] else: stderr = _remove_sensitive_data(result['stderr']) stderrs.append(stderr) # we've tried all IDs and still haven't passed, so error out raise CommandExecutionError("\n\n".join(stderrs)) else: result = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=runas, output_loglevel='quiet', env=env, python_shell=False, **kwargs) retcode = result['retcode'] if retcode == 0: return result['stdout'] else: stderr = _remove_sensitive_data(result['stderr']) raise CommandExecutionError( 'Command {0!r} failed. Stderr: {1!r}'.format(cmd, stderr)) def _remove_sensitive_data(sensitive_output): ''' Remove HTTP user and password. ''' return re.sub('(https?)://.*@', r'\1://<redacted>@', sensitive_output) def _git_getdir(cwd, user=None): ''' Returns the absolute path to the top-level of a given repo because some Git commands are sensitive to where they're run from (archive for one) ''' cmd_bare = 'git rev-parse --is-bare-repository' is_bare = __salt__['cmd.run_stdout'](cmd_bare, cwd, runas=user) == 'true' if is_bare: return cwd cmd_toplvl = 'git rev-parse --show-toplevel' return __salt__['cmd.run'](cmd_toplvl, cwd) def _check_git(): ''' Check if git is available ''' utils.check_or_die('git') def _add_http_basic_auth(repository, https_user=None, https_pass=None): if https_user is None and https_pass is None: return repository else: urltuple = _urlparse(repository) if urltuple.scheme == 'https': if https_pass: auth_string = "{0}:{1}".format(https_user, https_pass) else: auth_string = https_user netloc = "{0}@{1}".format(auth_string, urltuple.netloc) urltuple = urltuple._replace(netloc=netloc) return _urlunparse(urltuple) else: raise ValueError('Basic Auth only supported for HTTPS scheme') def current_branch(cwd, user=None): ''' Returns the current branch name, if on a branch. CLI Example: .. code-block:: bash salt '*' git.current_branch /path/to/repo ''' cmd = r'git rev-parse --abbrev-ref HEAD' return __salt__['cmd.run_stdout'](cmd, cwd=cwd, runas=user) def revision(cwd, rev='HEAD', short=False, user=None): ''' Returns the long hash of a given identifier (hash, branch, tag, HEAD, etc) cwd The path to the Git repository rev: HEAD The revision short: False Return an abbreviated SHA1 git hash user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.revision /path/to/repo mybranch ''' _check_git() cmd = 'git rev-parse {0}{1}'.format('--short ' if short else '', rev) return _git_run(cmd, cwd, runas=user) def clone(cwd, repository, opts=None, user=None, identity=None, https_user=None, https_pass=None): ''' Clone a new repository cwd The path to the Git repository repository The git URI of the repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 20515.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.clone /path/to/repo git://github.com/saltstack/salt.git salt '*' git.clone /path/to/repo.git\\ git://github.com/saltstack/salt.git '--bare --origin github' ''' _check_git() repository = _add_http_basic_auth(repository, https_user, https_pass) if not opts: opts = '' if utils.is_windows(): cmd = 'git clone {0} {1} {2}'.format(repository, cwd, opts) else: cmd = 'git clone {0} {1!r} {2}'.format(repository, cwd, opts) return _git_run(cmd, runas=user, identity=identity) def describe(cwd, rev='HEAD', user=None): ''' Returns the git describe string (or the SHA hash if there are no tags) for the given revision cwd The path to the Git repository rev: HEAD The revision to describe user : None Run git as a user other than what the minion runs as CLI Examples: .. code-block:: bash salt '*' git.describe /path/to/repo salt '*' git.describe /path/to/repo develop ''' cmd = 'git describe {0}'.format(rev) return __salt__['cmd.run_stdout'](cmd, cwd=cwd, runas=user, python_shell=False) def archive(cwd, output, rev='HEAD', fmt=None, prefix=None, user=None): ''' Export a tarball from the repository cwd The path to the Git repository output The path to the archive tarball rev: HEAD The revision to create an archive from fmt: None Format of the resulting archive, zip and tar are commonly used prefix : None Prepend <prefix>/ to every filename in the archive user : None Run git as a user other than what the minion runs as If ``prefix`` is not specified it defaults to the basename of the repo directory. CLI Example: .. code-block:: bash salt '*' git.archive /path/to/repo /path/to/archive.tar.gz ''' _check_git() basename = '{0}/'.format(os.path.basename(_git_getdir(cwd, user=user))) cmd = 'git archive{prefix}{fmt} -o {output} {rev}'.format( rev=rev, output=output, fmt=' --format={0}'.format(fmt) if fmt else '', prefix=' --prefix="{0}"'.format(prefix if prefix else basename) ) return _git_run(cmd, cwd=cwd, runas=user) def fetch(cwd, opts=None, user=None, identity=None): ''' Perform a fetch on the given repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.fetch /path/to/repo '--all' salt '*' git.fetch cwd=/path/to/repo opts='--all' user=johnny ''' _check_git() if not opts: opts = '' cmd = 'git fetch {0}'.format(opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def pull(cwd, opts=None, user=None, identity=None): ''' Perform a pull on the given repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.pull /path/to/repo opts='--rebase origin master' ''' _check_git() if not opts: opts = '' return _git_run('git pull {0}'.format(opts), cwd=cwd, runas=user, identity=identity) def rebase(cwd, rev='master', opts=None, user=None): ''' Rebase the current branch cwd The path to the Git repository rev : master The revision to rebase onto the current branch opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.rebase /path/to/repo master salt '*' git.rebase /path/to/repo 'origin master' That is the same as: .. code-block:: bash git rebase master git rebase origin master ''' _check_git() if not opts: opts = '' return _git_run('git rebase {0} {1}'.format(opts, rev), cwd=cwd, runas=user) def checkout(cwd, rev, force=False, opts=None, user=None): ''' Checkout a given revision cwd The path to the Git repository rev The remote branch or revision to checkout force : False Force a checkout even if there might be overwritten changes opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Examples: .. code-block:: bash salt '*' git.checkout /path/to/repo somebranch user=jeff salt '*' git.checkout /path/to/repo opts='testbranch -- conf/file1 file2' salt '*' git.checkout /path/to/repo rev=origin/mybranch opts=--track ''' _check_git() if not opts: opts = '' cmd = 'git checkout {0} {1} {2}'.format(' -f' if force else '', rev, opts) return _git_run(cmd, cwd=cwd, runas=user) def merge(cwd, branch='@{upstream}', opts=None, user=None): ''' Merge a given branch cwd The path to the Git repository branch : @{upstream} The remote branch or revision to merge into the current branch opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.fetch /path/to/repo salt '*' git.merge /path/to/repo @{upstream} ''' _check_git() if not opts: opts = '' cmd = 'git merge {0} {1}'.format(branch, opts) return _git_run(cmd, cwd, runas=user) def init(cwd, opts=None, user=None): ''' Initialize a new git repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.init /path/to/repo.git opts='--bare' ''' _check_git() if not opts: opts = '' cmd = 'git init {0} {1}'.format(cwd, opts) return _git_run(cmd, runas=user) def submodule(cwd, init=True, opts=None, user=None, identity=None): ''' Initialize git submodules cwd The path to the Git repository init : True Ensure that new submodules are initialized opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.submodule /path/to/repo.git/sub/repo ''' _check_git() if not opts: opts = '' cmd = 'git submodule update {0} {1}'.format('--init' if init else '', opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def status(cwd, user=None): ''' Return the status of the repository. The returned format uses the status codes of git's 'porcelain' output mode cwd The path to the Git repository user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.status /path/to/git/repo ''' cmd = 'git status -z --porcelain' stdout = _git_run(cmd, cwd=cwd, runas=user) state_by_file = [] for line in stdout.split("\0"): state = line[:2] filename = line[3:] if filename != '' and state != '': state_by_file.append((state, filename)) return state_by_file def add(cwd, file_name, user=None, opts=None): ''' add a file to git cwd The path to the Git repository file_name Path to the file in the cwd opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.add /path/to/git/repo /path/to/file ''' if not opts: opts = '' cmd = 'git add {0} {1}'.format(file_name, opts) return _git_run(cmd, cwd=cwd, runas=user) def rm(cwd, file_name, user=None, opts=None): ''' Remove a file from git cwd The path to the Git repository file_name Path to the file in the cwd opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.rm /path/to/git/repo /path/to/file ''' if not opts: opts = '' cmd = 'git rm {0} {1}'.format(file_name, opts) return _git_run(cmd, cwd=cwd, runas=user) def commit(cwd, message, user=None, opts=None): ''' create a commit cwd The path to the Git repository message The commit message opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.commit /path/to/git/repo 'The commit message' ''' cmd = subprocess.list2cmdline(['git', 'commit', '-m', message]) # add opts separately; they don't need to be quoted if opts: cmd = cmd + ' ' + opts return _git_run(cmd, cwd=cwd, runas=user) def push(cwd, remote_name, branch='master', user=None, opts=None, identity=None): ''' Push to remote cwd The path to the Git repository remote_name Name of the remote to push to branch : master Name of the branch to push opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.push /path/to/git/repo remote-name ''' if not opts: opts = '' cmd = 'git push {0} {1} {2}'.format(remote_name, branch, opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def remotes(cwd, user=None): ''' Get remotes like git remote -v cwd The path to the Git repository user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.remotes /path/to/repo ''' cmd = 'git remote' ret = _git_run(cmd, cwd=cwd, runas=user) res = dict() for remote_name in ret.splitlines(): remote = remote_name.strip() res[remote] = remote_get(cwd, remote, user=user) return res def remote_get(cwd, remote='origin', user=None): ''' get the fetch and push URL for a specified remote name remote : origin the remote name used to define the fetch and push URL user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.remote_get /path/to/repo salt '*' git.remote_get /path/to/repo upstream ''' try: cmd = 'git remote show -n {0}'.format(remote) ret = _git_run(cmd, cwd=cwd, runas=user) lines = ret.splitlines() remote_fetch_url = lines[1].replace('Fetch URL: ', '').strip() remote_push_url = lines[2].replace('Push URL: ', '').strip() if remote_fetch_url != remote and remote_push_url != remote: res = (remote_fetch_url, remote_push_url) return res else: return None except CommandExecutionError: return None def remote_set(cwd, name='origin', url=None, user=None, https_user=None, https_pass=None): ''' sets a remote with name and URL like git remote add <remote_name> <remote_url> remote_name : origin defines the remote name remote_url : None defines the remote URL; should not be None! user : None Run git as a user other than what the minion runs as https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.remote_set /path/to/repo remote_url=git@github.com:saltstack/salt.git salt '*' git.remote_set /path/to/repo origin git@github.com:saltstack/salt.git ''' if remote_get(cwd, name): cmd = 'git remote rm {0}'.format(name) _git_run(cmd, cwd=cwd, runas=user) url = _add_http_basic_auth(url, https_user, https_pass) cmd = 'git remote add {0} {1}'.format(name, url) _git_run(cmd, cwd=cwd, runas=user) return remote_get(cwd=cwd, remote=name, user=None) def branch(cwd, rev, opts=None, user=None): ''' Interacts with branches. cwd The path to the Git repository rev The branch/revision to be used in the command. opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.branch mybranch --set-upstream-to=origin/mybranch ''' cmd = 'git branch {0} {1}'.format(rev, opts) _git_run(cmd, cwd=cwd, user=user) return current_branch(cwd, user=user) def reset(cwd, opts=None, user=None): ''' Reset the repository checkout cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.reset /path/to/repo master ''' _check_git() if not opts: opts = '' return _git_run('git reset {0}'.format(opts), cwd=cwd, runas=user) def stash(cwd, opts=None, user=None): ''' Stash changes in the repository checkout cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.stash /path/to/repo master ''' _check_git() if not opts: opts = '' return _git_run('git stash {0}'.format(opts), cwd=cwd, runas=user) def config_set(cwd=None, setting_name=None, setting_value=None, user=None, is_global=False): ''' Set a key in the git configuration file (.git/config) of the repository or globally. cwd : None Options path to the Git repository .. versionchanged:: 2014.7.0 Made ``cwd`` optional setting_name : None The name of the configuration key to set. Required. setting_value : None The (new) value to set. Required. user : None Run git as a user other than what the minion runs as is_global : False Set to True to use the '--global' flag with 'git config' CLI Example: .. code-block:: bash salt '*' git.config_set /path/to/repo user.email me@example.com ''' if setting_name is None or setting_value is None: raise TypeError('Missing required parameter setting_name for git.config_set') if cwd is None and not is_global: raise SaltInvocationError('Either `is_global` must be set to True or ' 'you must provide `cwd`') if is_global: cmd = 'git config --global {0} "{1}"'.format(setting_name, setting_value) else: cmd = 'git config {0} "{1}"'.format(setting_name, setting_value) _check_git() return _git_run(cmd, cwd=cwd, runas=user) def config_get(cwd=None, setting_name=None, user=None): ''' Get a key or keys from the git configuration file (.git/config). cwd : None Optional path to a Git repository .. versionchanged:: 2014.7.0 Made ``cwd`` optional setting_name : None The name of the configuration key to get. Required. user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.config_get setting_name=user.email salt '*' git.config_get /path/to/repo user.name arthur ''' if setting_name is None: raise TypeError('Missing required parameter setting_name for git.config_get') _check_git() return _git_run('git config {0}'.format(setting_name), cwd=cwd, runas=user) def ls_remote(cwd, repository="origin", branch="master", user=None, identity=None, https_user=None, https_pass=None): ''' Returns the upstream hash for any given URL and branch. cwd The path to the Git repository repository: origin The name of the repository to get the revision from. Can be the name of a remote, an URL, etc. branch: master The name of the branch to get the revision from. user : none run git as a user other than what the minion runs as identity : none a path to a private key to use over ssh https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.ls_remote /pat/to/repo origin master ''' _check_git() repository = _add_http_basic_auth(repository, https_user, https_pass) cmd = ' '.join(["git", "ls-remote", "-h", str(repository), str(branch), "| cut -f 1"]) return _git_run(cmd, cwd=cwd, runas=user, identity=identity)
./CrossVul/dataset_final_sorted/CWE-200/py/good_1725_0
crossvul-python_data_bad_3807_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from lxml import etree from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils from nova.virt import images CONF = cfg.CONF LOG = logging.getLogger(__name__) def execute(*args, **kwargs): return utils.execute(*args, **kwargs) def get_iscsi_initiator(): """Get iscsi initiator name for this machine""" # NOTE(vish) openiscsi stores initiator name in a file that # needs root permission to read. contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') for l in contents.split('\n'): if l.startswith('InitiatorName='): return l[l.index('=') + 1:].strip() def create_image(disk_format, path, size): """Create a disk image :param disk_format: Disk image format (as known by qemu-img) :param path: Desired location of the disk image :param size: Desired size of disk image. May be given as an int or a string. If given as an int, it will be interpreted as bytes. If it's a string, it should consist of a number with an optional suffix ('K' for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes). If no suffix is given, it will be interpreted as bytes. """ execute('qemu-img', 'create', '-f', disk_format, path, size) def create_cow_image(backing_file, path): """Create COW image Creates a COW image with the given backing file :param backing_file: Existing image on which to base the COW image :param path: Desired location of the COW image """ base_cmd = ['qemu-img', 'create', '-f', 'qcow2'] cow_opts = [] if backing_file: cow_opts += ['backing_file=%s' % backing_file] base_details = images.qemu_img_info(backing_file) else: base_details = None # This doesn't seem to get inherited so force it to... # http://paste.ubuntu.com/1213295/ # TODO(harlowja) probably file a bug against qemu-img/qemu if base_details and base_details.cluster_size is not None: cow_opts += ['cluster_size=%s' % base_details.cluster_size] # For now don't inherit this due the following discussion... # See: http://www.gossamer-threads.com/lists/openstack/dev/10592 # if 'preallocation' in base_details: # cow_opts += ['preallocation=%s' % base_details['preallocation']] if base_details and base_details.encryption: cow_opts += ['encryption=%s' % base_details.encryption] if cow_opts: # Format as a comma separated list csv_opts = ",".join(cow_opts) cow_opts = ['-o', csv_opts] cmd = base_cmd + cow_opts + [path] execute(*cmd) def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ free_space = volume_group_free_space(vg) def check_size(size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % locals()) if sparse: preallocated_space = 64 * 1024 * 1024 check_size(preallocated_space) if free_space < size: LOG.warning(_('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.') % locals()) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3) def volume_group_free_space(vg): """Return available space on volume group in bytes. :param vg: volume group name """ out, err = execute('vgs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'vg_free', vg, run_as_root=True) return int(out.strip()) def list_logical_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name """ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()] def logical_volume_info(path): """Get logical volume info. :param path: logical volume path """ out, err = execute('lvs', '-o', 'vg_all,lv_all', '--separator', '|', path, run_as_root=True) info = [line.split('|') for line in out.splitlines()] if len(info) != 2: raise RuntimeError(_("Path %s must be LVM logical volume") % path) return dict(zip(*info)) def remove_logical_volumes(*paths): """Remove one or more logical volume.""" if paths: lvremove = ('lvremove', '-f') + paths execute(*lvremove, attempts=3, run_as_root=True) def pick_disk_driver_name(is_block_dev=False): """Pick the libvirt primary backend driver name If the hypervisor supports multiple backend drivers, then the name attribute selects the primary backend driver name, while the optional type attribute provides the sub-type. For example, xen supports a name of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2", while qemu only supports a name of "qemu", but multiple types including "raw", "bochs", "qcow2", and "qed". :param is_block_dev: :returns: driver_name or None """ if CONF.libvirt_type == "xen": if is_block_dev: return "phy" else: return "tap" elif CONF.libvirt_type in ('kvm', 'qemu'): return "qemu" else: # UML doesn't want a driver_name set return None def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path).virtual_size return int(size) def get_disk_backing_file(path): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).backing_file if backing_file: backing_file = os.path.basename(backing_file) return backing_file def copy_image(src, dest, host=None): """Copy a disk image to an existing directory :param src: Source image :param dest: Destination path :param host: Remote host """ if not host: # We shell out to cp because that will intelligently copy # sparse files. I.E. holes will not be written to DEST, # rather recreated efficiently. In addition, since # coreutils 8.11, holes can be read efficiently too. execute('cp', src, dest) else: dest = "%s:%s" % (host, dest) # Try rsync first as that can compress and create sparse dest files. # Note however that rsync currently doesn't read sparse files # efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918 # At least network traffic is mitigated with compression. try: # Do a relatively light weight test first, so that we # can fall back to scp, without having run out of space # on the destination for example. execute('rsync', '--sparse', '--compress', '--dry-run', src, dest) except exception.ProcessExecutionError: execute('scp', src, dest) else: execute('rsync', '--sparse', '--compress', src, dest) def write_to_file(path, contents, umask=None): """Write the given contents to a file :param path: Destination file :param contents: Desired contents of the file :param umask: Umask to set when creating this file (will be reset) """ if umask: saved_umask = os.umask(umask) try: with open(path, 'w') as f: f.write(contents) finally: if umask: os.umask(saved_umask) def chown(path, owner): """Change ownership of file or directory :param path: File or directory whose ownership to change :param owner: Desired new owner (given as uid or username) """ execute('chown', owner, path, run_as_root=True) def create_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def delete_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): """Extract a named snapshot from a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image :param out_path: Desired path of extracted snapshot """ # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt, '-s', snapshot_name, disk_path, out_path) execute(*qemu_img_cmd) def load_file(path): """Read contents of file :param path: File to read """ with open(path, 'r') as fp: return fp.read() def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def file_delete(path): """Delete (unlink) file Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return os.unlink(path) def find_disk(virt_dom): """Find root device path for instance May be file or device""" xml_desc = virt_dom.XMLDesc(0) domain = etree.fromstring(xml_desc) if CONF.libvirt_type == 'lxc': source = domain.find('devices/filesystem/source') disk_path = source.get('dir') disk_path = disk_path[0:disk_path.rfind('rootfs')] disk_path = os.path.join(disk_path, 'disk') else: source = domain.find('devices/disk/source') disk_path = source.get('file') or source.get('dev') if not disk_path: raise RuntimeError(_("Can't retrieve root device path " "from instance libvirt configuration")) return disk_path def get_disk_type(path): """Retrieve disk type (raw, qcow2, lvm) for given file""" if path.startswith('/dev'): return 'lvm' return images.qemu_img_info(path).file_format def get_fs_info(path): """Get free/used/total space info for a filesystem :param path: Any dirent on the filesystem :returns: A dict containing: :free: How much space is free (in bytes) :used: How much space is used (in bytes) :total: How big the filesystem is (in bytes) """ hddinfo = os.statvfs(path) total = hddinfo.f_frsize * hddinfo.f_blocks free = hddinfo.f_frsize * hddinfo.f_bavail used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree) return {'total': total, 'free': free, 'used': used} def fetch_image(context, target, image_id, user_id, project_id): """Grab image""" images.fetch_to_raw(context, image_id, target, user_id, project_id)
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3807_0
crossvul-python_data_bad_1559_0
import os.path import logging from ceph_deploy import hosts, exc from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def fetch_file(args, frompath, topath, _hosts): if os.path.exists(topath): LOG.debug('Have %s', topath) return True else: for hostname in _hosts: filepath = frompath.format(hostname=hostname) LOG.debug('Checking %s for %s', hostname, filepath) distro = hosts.get(hostname, username=args.username) key = distro.conn.remote_module.get_file(filepath) if key is not None: LOG.debug('Got %s key from %s.', topath, hostname) with file(topath, 'w') as f: f.write(key) return True distro.conn.exit() LOG.warning('Unable to find %s on %s', filepath, hostname) return False def gatherkeys(args): # client.admin keyring = '/etc/ceph/{cluster}.client.admin.keyring'.format( cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.client.admin.keyring'.format( cluster=args.cluster), _hosts=args.mon, ) if not r: raise exc.KeyNotFoundError(keyring, args.mon) # mon. keyring = '/var/lib/ceph/mon/{cluster}-{{hostname}}/keyring'.format( cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.mon.keyring'.format(cluster=args.cluster), _hosts=args.mon, ) if not r: raise exc.KeyNotFoundError(keyring, args.mon) # bootstrap for what in ['osd', 'mds', 'rgw']: keyring = '/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format( what=what, cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.bootstrap-{what}.keyring'.format( cluster=args.cluster, what=what), _hosts=args.mon, ) if not r: if what in ['osd', 'mds']: raise exc.KeyNotFoundError(keyring, args.mon) else: LOG.warning(("No RGW bootstrap key found. Will not be able to " "deploy RGW daemons")) @priority(40) def make(parser): """ Gather authentication keys for provisioning new nodes. """ parser.add_argument( 'mon', metavar='HOST', nargs='+', help='monitor host to pull keys from', ) parser.set_defaults( func=gatherkeys, )
./CrossVul/dataset_final_sorted/CWE-200/py/bad_1559_0
crossvul-python_data_good_609_0
import re import warnings import six from django.http import HttpResponse from django.utils.crypto import constant_time_compare from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_AUTHORIZATION in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: request_auth = get_request_basic_auth(request) # Use constant_time_compare to avoid timing attack on basic auth. (It's OK that any() # can terminate early: we're not trying to protect how many auth strings are allowed, # just the contents of each individual auth string.) auth_ok = any(constant_time_compare(request_auth, allowed_auth) for allowed_auth in self.basic_auth) if not auth_ok: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) Security note: use django.utils.crypto.constant_time_compare for string comparisons, to avoid exposing your validation to a timing attack. """ # if not constant_time_compare(request.POST['signature'], expected_signature): # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. (E.g., MailgunTrackingWebhookView will return "Mailgun") """ return re.sub(r'(Tracking|Inbox)WebhookView$', "", self.__class__.__name__)
./CrossVul/dataset_final_sorted/CWE-200/py/good_609_0
crossvul-python_data_good_1775_0
import datetime import decimal import unicodedata from importlib import import_module from django.conf import settings from django.utils import dateformat, datetime_safe, numberformat, six from django.utils.encoding import force_str from django.utils.functional import lazy from django.utils.safestring import mark_safe from django.utils.translation import ( check_for_language, get_language, to_locale, ) # format_cache is a mapping from (format_type, lang) to the format string. # By using the cache, it is possible to avoid running get_format_modules # repeatedly. _format_cache = {} _format_modules_cache = {} ISO_INPUT_FORMATS = { 'DATE_INPUT_FORMATS': ['%Y-%m-%d'], 'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'], 'DATETIME_INPUT_FORMATS': [ '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M', '%Y-%m-%d' ], } FORMAT_SETTINGS = frozenset([ 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'FIRST_DAY_OF_WEEK', 'MONTH_DAY_FORMAT', 'TIME_FORMAT', 'DATE_FORMAT', 'DATETIME_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'YEAR_MONTH_FORMAT', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS', ]) def reset_format_cache(): """Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed. """ global _format_cache, _format_modules_cache _format_cache = {} _format_modules_cache = {} def iter_format_modules(lang, format_module_path=None): """ Does the heavy lifting of finding format modules. """ if not check_for_language(lang): return if format_module_path is None: format_module_path = settings.FORMAT_MODULE_PATH format_locations = [] if format_module_path: if isinstance(format_module_path, six.string_types): format_module_path = [format_module_path] for path in format_module_path: format_locations.append(path + '.%s') format_locations.append('django.conf.locale.%s') locale = to_locale(lang) locales = [locale] if '_' in locale: locales.append(locale.split('_')[0]) for location in format_locations: for loc in locales: try: yield import_module('%s.formats' % (location % loc)) except ImportError: pass def get_format_modules(lang=None, reverse=False): """ Returns a list of the format modules found """ if lang is None: lang = get_language() modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))) if reverse: return list(reversed(modules)) return modules def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, returns the format for the current language (locale), defaults to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT' If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ format_type = force_str(format_type) if format_type not in FORMAT_SETTINGS: return format_type if use_l10n or (use_l10n is None and settings.USE_L10N): if lang is None: lang = get_language() cache_key = (format_type, lang) try: cached = _format_cache[cache_key] if cached is not None: return cached else: # Return the general setting by default return getattr(settings, format_type) except KeyError: for module in get_format_modules(lang): try: val = getattr(module, format_type) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: if isinstance(val, tuple): val = list(val) val.append(iso_input) _format_cache[cache_key] = val return val except AttributeError: pass _format_cache[cache_key] = None return getattr(settings, format_type) get_format_lazy = lazy(get_format, six.text_type, list, tuple) def date_format(value, format=None, use_l10n=None): """ Formats a datetime.date or datetime.datetime object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) def time_format(value, format=None, use_l10n=None): """ Formats a datetime.time object using a localizable format If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False): """ Formats a numeric value using localization settings If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if use_l10n or (use_l10n is None and settings.USE_L10N): lang = get_language() else: lang = None return numberformat.format( value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping ) def localize(value, use_l10n=None): """ Checks if value is a localizable type (date, number...) and returns it formatted as a string using current locale format. If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ if isinstance(value, six.string_types): # Handle strings first for performance reasons. return value elif isinstance(value, bool): # Make sure booleans don't get treated as numbers return mark_safe(six.text_type(value)) elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.datetime): return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n) elif isinstance(value, datetime.date): return date_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.time): return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n) return value def localize_input(value, default=None): """ Checks if an input value is a localizable type and returns it formatted with the appropriate formatting string of the current locale. """ if isinstance(value, six.string_types): # Handle strings first for performance reasons. return value elif isinstance(value, (decimal.Decimal, float) + six.integer_types): return number_format(value) elif isinstance(value, datetime.datetime): value = datetime_safe.new_datetime(value) format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.date): value = datetime_safe.new_date(value) format = force_str(default or get_format('DATE_INPUT_FORMATS')[0]) return value.strftime(format) elif isinstance(value, datetime.time): format = force_str(default or get_format('TIME_INPUT_FORMATS')[0]) return value.strftime(format) return value def sanitize_separators(value): """ Sanitizes a value according to the current decimal and thousand separator setting. Used with form field input. """ if settings.USE_L10N and isinstance(value, six.string_types): parts = [] decimal_separator = get_format('DECIMAL_SEPARATOR') if decimal_separator in value: value, decimals = value.split(decimal_separator, 1) parts.append(decimals) if settings.USE_THOUSAND_SEPARATOR: thousand_sep = get_format('THOUSAND_SEPARATOR') if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3: # Special case where we suspect a dot meant decimal separator (see #22171) pass else: for replacement in { thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}: value = value.replace(replacement, '') parts.append(value) value = '.'.join(reversed(parts)) return value
./CrossVul/dataset_final_sorted/CWE-200/py/good_1775_0
crossvul-python_data_bad_2837_2
from __future__ import print_function import argparse import json from oauthlib.oauth2 import LegacyApplicationClient import logging import logging.handlers from requests_oauthlib import OAuth2Session import os import requests import six import sys import traceback from six.moves.urllib.parse import quote as urlquote from six.moves.urllib.parse import urlparse # ------------------------------------------------------------------------------ logger = None prog_name = os.path.basename(sys.argv[0]) AUTH_ROLES = ['root-admin', 'realm-admin', 'anonymous'] LOG_FILE_ROTATION_COUNT = 3 TOKEN_URL_TEMPLATE = ( '{server}/auth/realms/{realm}/protocol/openid-connect/token') GET_SERVER_INFO_TEMPLATE = ( '{server}/auth/admin/serverinfo/') GET_REALMS_URL_TEMPLATE = ( '{server}/auth/admin/realms') CREATE_REALM_URL_TEMPLATE = ( '{server}/auth/admin/realms') DELETE_REALM_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}') GET_REALM_METADATA_TEMPLATE = ( '{server}/auth/realms/{realm}/protocol/saml/descriptor') CLIENT_REPRESENTATION_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}') GET_CLIENTS_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients') CLIENT_DESCRIPTOR_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/client-description-converter') CREATE_CLIENT_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients') GET_INITIAL_ACCESS_TOKEN_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients-initial-access') SAML2_CLIENT_REGISTRATION_TEMPLATE = ( '{server}/auth/realms/{realm}/clients-registrations/saml2-entity-descriptor') GET_CLIENT_PROTOCOL_MAPPERS_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/models') GET_CLIENT_PROTOCOL_MAPPERS_BY_PROTOCOL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/protocol/{protocol}') POST_CLIENT_PROTOCOL_MAPPER_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/models') ADMIN_CLIENT_ID = 'admin-cli' # ------------------------------------------------------------------------------ class RESTError(Exception): def __init__(self, status_code, status_reason, response_json, response_text, cmd): self.status_code = status_code self.status_reason = status_reason self.error_description = None self.error = None self.response_json = response_json self.response_text = response_text self.cmd = cmd self.message = '{status_reason}({status_code}): '.format( status_reason=self.status_reason, status_code=self.status_code) if response_json: self.error_description = response_json.get('error_description') if self.error_description is None: self.error_description = response_json.get('errorMessage') self.error = response_json.get('error') self.message += '"{error_description}" [{error}]'.format( error_description=self.error_description, error=self.error) else: self.message += '"{response_text}"'.format( response_text=self.response_text) self.args = (self.message,) def __str__(self): return self.message # ------------------------------------------------------------------------------ def configure_logging(options): global logger # pylint: disable=W0603 log_dir = os.path.dirname(options.log_file) if os.path.exists(log_dir): if not os.path.isdir(log_dir): raise ValueError('logging directory "{log_dir}" exists but is not ' 'directory'.format(log_dir=log_dir)) else: os.makedirs(log_dir) log_level = logging.ERROR if options.verbose: log_level = logging.INFO if options.debug: log_level = logging.DEBUG # These two lines enable debugging at httplib level # (requests->urllib3->http.client) You will see the REQUEST, # including HEADERS and DATA, and RESPONSE with HEADERS but # without DATA. The only thing missing will be the # response.body which is not logged. try: import http.client as http_client # Python 3 except ImportError: import httplib as http_client # Python 2 http_client.HTTPConnection.debuglevel = 1 # Turn on cookielib debugging if False: try: import http.cookiejar as cookiejar except ImportError: import cookielib as cookiejar # Python 2 cookiejar.debug = True logger = logging.getLogger(prog_name) try: file_handler = logging.handlers.RotatingFileHandler( options.log_file, backupCount=LOG_FILE_ROTATION_COUNT) except IOError as e: print('Unable to open log file %s (%s)' % (options.log_file, e), file=sys.stderr) else: formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s: %(message)s') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) console_handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(message)s') console_handler.setFormatter(formatter) console_handler.setLevel(log_level) logger.addHandler(console_handler) # Set the log level on the logger to the lowest level # possible. This allows the message to be emitted from the logger # to it's handlers where the level will be filtered on a per # handler basis. logger.setLevel(1) # ------------------------------------------------------------------------------ def json_pretty(text): return json.dumps(json.loads(text), indent=4, sort_keys=True) def py_json_pretty(py_json): return json_pretty(json.dumps(py_json)) def server_name_from_url(url): return urlparse(url).netloc def get_realm_names_from_realms(realms): return [x['realm'] for x in realms] def get_client_client_ids_from_clients(clients): return [x['clientId'] for x in clients] def find_client_by_name(clients, client_id): for client in clients: if client.get('clientId') == client_id: return client raise KeyError('{item} not found'.format(item=client_id)) # ------------------------------------------------------------------------------ class KeycloakREST(object): def __init__(self, server, auth_role=None, session=None): self.server = server self.auth_role = auth_role self.session = session def get_initial_access_token(self, realm_name): cmd_name = "get initial access token for realm '{realm}'".format( realm=realm_name) url = GET_INITIAL_ACCESS_TOKEN_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) params = {"expiration": 60, # seconds "count": 1} response = self.session.post(url, json=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json # ClientInitialAccessPresentation def get_server_info(self): cmd_name = "get server info" url = GET_SERVER_INFO_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_realms(self): cmd_name = "get realms" url = GET_REALMS_URL_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def create_realm(self, realm_name): cmd_name = "create realm '{realm}'".format(realm=realm_name) url = CREATE_REALM_URL_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) params = {"enabled": True, "id": realm_name, "realm": realm_name, } response = self.session.post(url, json=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def delete_realm(self, realm_name): cmd_name = "delete realm '{realm}'".format(realm=realm_name) url = DELETE_REALM_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.delete(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def get_realm_metadata(self, realm_name): cmd_name = "get metadata for realm '{realm}'".format(realm=realm_name) url = GET_REALM_METADATA_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.ok: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) return response.text def get_clients(self, realm_name): cmd_name = "get clients in realm '{realm}'".format(realm=realm_name) url = GET_CLIENTS_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_client_by_id(self, realm_name, id): cmd_name = "get client id {id} in realm '{realm}'".format( id=id, realm=realm_name) url = GET_CLIENTS_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) params = {'clientID': id} logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url, params=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_client_by_name(self, realm_name, client_name): clients = self.get_clients(realm_name) client = find_client_by_name(clients, client_name) id = client.get('id') logger.debug("client name '%s' mapped to id '%s'", client_name, id) logger.debug("client %s\n%s", client_name, py_json_pretty(client)) return client def get_client_id_by_name(self, realm_name, client_name): client = self.get_client_by_name(realm_name, client_name) id = client.get('id') return id def get_client_descriptor(self, realm_name, metadata): cmd_name = "get client descriptor realm '{realm}'".format( realm=realm_name) url = CLIENT_DESCRIPTOR_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) headers = {'Content-Type': 'application/xml;charset=utf-8'} response = self.session.post(url, headers=headers, data=metadata) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def create_client_from_descriptor(self, realm_name, descriptor): cmd_name = "create client from descriptor " "'{client_id}'in realm '{realm}'".format( client_id=descriptor['clientId'], realm=realm_name) url = CREATE_CLIENT_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.post(url, json=descriptor) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def create_client(self, realm_name, metadata): logger.debug("create client in realm %s on server %s", realm_name, self.server) descriptor = self.get_client_descriptor(realm_name, metadata) self.create_client_from_descriptor(realm_name, descriptor) return descriptor def register_client(self, initial_access_token, realm_name, metadata): cmd_name = "register_client realm '{realm}'".format( realm=realm_name) url = SAML2_CLIENT_REGISTRATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) headers = {'Content-Type': 'application/xml;charset=utf-8'} if initial_access_token: headers['Authorization'] = 'Bearer {token}'.format( token=initial_access_token) response = self.session.post(url, headers=headers, data=metadata) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.created): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json # ClientRepresentation def delete_client_by_name(self, realm_name, client_name): id = self.get_client_id_by_name(realm_name, client_name) self.delete_client_by_id(realm_name, id) def delete_client_by_id(self, realm_name, id): cmd_name = "delete client id '{id}'in realm '{realm}'".format( id=id, realm=realm_name) url = CLIENT_REPRESENTATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.delete(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def update_client(self, realm_name, client): id = client['id'] cmd_name = "update client {id} in realm '{realm}'".format( id=client['clientId'], realm=realm_name) url = CLIENT_REPRESENTATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.put(url, json=client) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def update_client_attributes(self, realm_name, client, update_attrs): client_id = client['clientId'] logger.debug("update client attrs: client_id=%s " "current attrs=%s update=%s" % (client_id, client['attributes'], update_attrs)) client['attributes'].update(update_attrs) logger.debug("update client attrs: client_id=%s " "new attrs=%s" % (client_id, client['attributes'])) self.update_client(realm_name, client); def update_client_by_name_attributes(self, realm_name, client_name, update_attrs): client = self.get_client_by_name(realm_name, client_name) self.update_client_attributes(realm_name, client, update_attrs) def new_saml_group_protocol_mapper(self, mapper_name, attribute_name, friendly_name=None, single_attribute=True): mapper = { 'protocol': 'saml', 'name': mapper_name, 'protocolMapper': 'saml-group-membership-mapper', 'config': { 'attribute.name': attribute_name, 'attribute.nameformat': 'Basic', 'single': single_attribute, 'full.path': False, }, } if friendly_name: mapper['config']['friendly.name'] = friendly_name return mapper def create_client_protocol_mapper(self, realm_name, client, mapper): id = client['id'] cmd_name = ("create protocol-mapper '{mapper_name}' for client {id} " "in realm '{realm}'".format( mapper_name=mapper['name'],id=client['clientId'], realm=realm_name)) url = POST_CLIENT_PROTOCOL_MAPPER_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.post(url, json=mapper) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def create_client_by_name_protocol_mapper(self, realm_name, client_name, mapper): client = self.get_client_by_name(realm_name, client_name) self.create_client_protocol_mapper(realm_name, client, mapper) def add_client_by_name_redirect_uris(self, realm_name, client_name, uris): client = self.get_client_by_name(realm_name, client_name) uris = set(uris) redirect_uris = set(client['redirectUris']) redirect_uris |= uris client['redirectUris'] = list(redirect_uris) self.update_client(realm_name, client); def remove_client_by_name_redirect_uris(self, realm_name, client_name, uris): client = self.get_client_by_name(realm_name, client_name) uris = set(uris) redirect_uris = set(client['redirectUris']) redirect_uris -= uris client['redirectUris'] = list(redirect_uris) self.update_client(realm_name, client); # ------------------------------------------------------------------------------ class KeycloakAdminConnection(KeycloakREST): def __init__(self, server, auth_role, realm, client_id, username, password, tls_verify): super(KeycloakAdminConnection, self).__init__(server, auth_role) self.realm = realm self.client_id = client_id self.username = username self.password = password self.session = self._create_session(tls_verify) def _create_session(self, tls_verify): token_url = TOKEN_URL_TEMPLATE.format( server=self.server, realm=urlquote(self.realm)) refresh_url = token_url client = LegacyApplicationClient(client_id=self.client_id) session = OAuth2Session(client=client, auto_refresh_url=refresh_url, auto_refresh_kwargs={ 'client_id': self.client_id}) session.verify = tls_verify token = session.fetch_token(token_url=token_url, username=self.username, password=self.password, client_id=self.client_id, verify=session.verify) return session class KeycloakAnonymousConnection(KeycloakREST): def __init__(self, server, tls_verify): super(KeycloakAnonymousConnection, self).__init__(server, 'anonymous') self.session = self._create_session(tls_verify) def _create_session(self, tls_verify): session = requests.Session() session.verify = tls_verify return session # ------------------------------------------------------------------------------ def do_server_info(options, conn): server_info = conn.get_server_info() print(json_pretty(server_info)) def do_list_realms(options, conn): realms = conn.get_realms() realm_names = get_realm_names_from_realms(realms) print('\n'.join(sorted(realm_names))) def do_create_realm(options, conn): conn.create_realm(options.realm_name) def do_delete_realm(options, conn): conn.delete_realm(options.realm_name) def do_get_realm_metadata(options, conn): metadata = conn.get_realm_metadata(options.realm_name) print(metadata) def do_list_clients(options, conn): clients = conn.get_clients(options.realm_name) client_ids = get_client_client_ids_from_clients(clients) print('\n'.join(sorted(client_ids))) def do_create_client(options, conn): metadata = options.metadata.read() descriptor = conn.create_client(options.realm_name, metadata) def do_register_client(options, conn): metadata = options.metadata.read() client_representation = conn.register_client( options.initial_access_token, options.realm_name, metadata) def do_delete_client(options, conn): conn.delete_client_by_name(options.realm_name, options.client_name) def do_client_test(options, conn): 'experimental test code used during development' uri = 'https://openstack.jdennis.oslab.test:5000/v3/mellon/fooResponse' conn.remove_client_by_name_redirect_uri(options.realm_name, options.client_name, uri) # ------------------------------------------------------------------------------ verbose_help = ''' The structure of the command line arguments is "noun verb" where noun is one of Keycloak's data items (e.g. realm, client, etc.) and the verb is an action to perform on the item. Each of the nouns and verbs may have their own set of arguments which must follow the noun or verb. For example to delete the client XYZ in the realm ABC: {prog_name} -s http://example.com:8080 -p password client delete -r ABC -c XYZ where 'client' is the noun, 'delete' is the verb and -r ABC -c XYZ are arguments to the delete action. If the command completes successfully the exit status is 0. The exit status is 1 if an authenticated connection with the server cannont be successfully established. The exit status is 2 if the REST operation fails. The server should be a scheme://hostname:port URL. ''' class TlsVerifyAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(TlsVerifyAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if values.lower() in ['true', 'yes', 'on']: verify = True elif values.lower() in ['false', 'no', 'off']: verify = False else: verify = values setattr(namespace, self.dest, verify) def main(): global logger result = 0 parser = argparse.ArgumentParser(description='Keycloak REST client', prog=prog_name, epilog=verbose_help.format(prog_name=prog_name), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='be chatty') parser.add_argument('-d', '--debug', action='store_true', help='turn on debug info') parser.add_argument('--show-traceback', action='store_true', help='exceptions print traceback in addition to ' 'error message') parser.add_argument('--log-file', default='/tmp/{prog_name}.log'.format( prog_name=prog_name), help='log file pathname') parser.add_argument('--permit-insecure-transport', action='store_true', help='Normally secure transport such as TLS ' 'is required, defeat this check') parser.add_argument('--tls-verify', action=TlsVerifyAction, default=True, help='TLS certificate verification for requests to' ' the server. May be one of case insenstive ' '[true, yes, on] to enable,' '[false, no, off] to disable.' 'Or the pathname to a OpenSSL CA bundle to use.' ' Default is True.') group = parser.add_argument_group('Server') group.add_argument('-s', '--server', required=True, help='DNS name or IP address of Keycloak server') group.add_argument('-a', '--auth-role', choices=AUTH_ROLES, default='root-admin', help='authenticating as what type of user (default: root-admin)') group.add_argument('-u', '--admin-username', default='admin', help='admin user name (default: admin)') group.add_argument('-p', '--admin-password', required=True, help='admin password') group.add_argument('--admin-realm', default='master', help='realm admin belongs to') cmd_parsers = parser.add_subparsers(help='available commands') # --- realm commands --- realm_parser = cmd_parsers.add_parser('realm', help='realm operations') sub_parser = realm_parser.add_subparsers(help='realm commands') cmd_parser = sub_parser.add_parser('server_info', help='dump server info') cmd_parser.set_defaults(func=do_server_info) cmd_parser = sub_parser.add_parser('list', help='list realm names') cmd_parser.set_defaults(func=do_list_realms) cmd_parser = sub_parser.add_parser('create', help='create new realm') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_create_realm) cmd_parser = sub_parser.add_parser('delete', help='delete existing realm') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_delete_realm) cmd_parser = sub_parser.add_parser('metadata', help='retrieve realm metadata') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_get_realm_metadata) # --- client commands --- client_parser = cmd_parsers.add_parser('client', help='client operations') sub_parser = client_parser.add_subparsers(help='client commands') cmd_parser = sub_parser.add_parser('list', help='list client names') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_list_clients) cmd_parser = sub_parser.add_parser('create', help='create new client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-m', '--metadata', type=argparse.FileType('rb'), required=True, help='SP metadata file or stdin') cmd_parser.set_defaults(func=do_create_client) cmd_parser = sub_parser.add_parser('register', help='register new client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-m', '--metadata', type=argparse.FileType('rb'), required=True, help='SP metadata file or stdin') cmd_parser.add_argument('--initial-access-token', required=True, help='realm initial access token for ' 'client registeration') cmd_parser.set_defaults(func=do_register_client) cmd_parser = sub_parser.add_parser('delete', help='delete existing client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-c', '--client-name', required=True, help='client name') cmd_parser.set_defaults(func=do_delete_client) cmd_parser = sub_parser.add_parser('test', help='experimental test used during ' 'development') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-c', '--client-name', required=True, help='client name') cmd_parser.set_defaults(func=do_client_test) # Process command line arguments options = parser.parse_args() configure_logging(options) if options.permit_insecure_transport: os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' try: anonymous_conn = KeycloakAnonymousConnection(options.server, options.tls_verify) admin_conn = KeycloakAdminConnection(options.server, options.auth_role, options.admin_realm, ADMIN_CLIENT_ID, options.admin_username, options.admin_password, options.tls_verify) except Exception as e: if options.show_traceback: traceback.print_exc() print(six.text_type(e), file=sys.stderr) result = 1 return result try: if options.func == do_register_client: conn = admin_conn else: conn = admin_conn result = options.func(options, conn) except Exception as e: if options.show_traceback: traceback.print_exc() print(six.text_type(e), file=sys.stderr) result = 2 return result return result # ------------------------------------------------------------------------------ if __name__ == '__main__': sys.exit(main()) else: logger = logging.getLogger('keycloak-cli')
./CrossVul/dataset_final_sorted/CWE-200/py/bad_2837_2
crossvul-python_data_bad_5206_0
# Copyright (C) 2016 JWCrypto Project Contributors - see LICENSE file import abc import os import struct from binascii import hexlify, unhexlify from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import constant_time, hashes, hmac from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import utils as ec_utils from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives.kdf.concatkdf import ConcatKDFHash from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from cryptography.hazmat.primitives.padding import PKCS7 import six from jwcrypto.common import InvalidCEKeyLength from jwcrypto.common import InvalidJWAAlgorithm from jwcrypto.common import InvalidJWEKeyLength from jwcrypto.common import InvalidJWEKeyType from jwcrypto.common import InvalidJWEOperation from jwcrypto.common import base64url_decode, base64url_encode from jwcrypto.common import json_decode from jwcrypto.jwk import JWK # Implements RFC 7518 - JSON Web Algorithms (JWA) @six.add_metaclass(abc.ABCMeta) class JWAAlgorithm(object): @abc.abstractproperty def name(self): """The algorithm Name""" pass @abc.abstractproperty def description(self): """A short description""" pass @abc.abstractproperty def keysize(self): """The actual/recommended/minimum key size""" pass @abc.abstractproperty def algorithm_usage_location(self): """One of 'alg', 'enc' or 'JWK'""" pass @abc.abstractproperty def algorithm_use(self): """One of 'sig', 'kex', 'enc'""" pass def _bitsize(x): return len(x) * 8 def _inbytes(x): return x // 8 def _randombits(x): if x % 8 != 0: raise ValueError("lenght must be a multiple of 8") return os.urandom(_inbytes(x)) # Note: the number of bits should be a multiple of 16 def _encode_int(n, bits): e = '{:x}'.format(n) ilen = ((bits + 7) // 8) * 2 # number of bytes rounded up times 2 bytes return unhexlify(e.rjust(ilen, '0')[:ilen]) def _decode_int(n): return int(hexlify(n), 16) class _RawJWS(object): def sign(self, key, payload): raise NotImplementedError def verify(self, key, payload, signature): raise NotImplementedError class _RawHMAC(_RawJWS): def __init__(self, hashfn): self.backend = default_backend() self.hashfn = hashfn def _hmac_setup(self, key, payload): h = hmac.HMAC(key, self.hashfn, backend=self.backend) h.update(payload) return h def sign(self, key, payload): skey = base64url_decode(key.get_op_key('sign')) h = self._hmac_setup(skey, payload) return h.finalize() def verify(self, key, payload, signature): vkey = base64url_decode(key.get_op_key('verify')) h = self._hmac_setup(vkey, payload) h.verify(signature) class _RawRSA(_RawJWS): def __init__(self, padfn, hashfn): self.padfn = padfn self.hashfn = hashfn def sign(self, key, payload): skey = key.get_op_key('sign') signer = skey.signer(self.padfn, self.hashfn) signer.update(payload) return signer.finalize() def verify(self, key, payload, signature): pkey = key.get_op_key('verify') verifier = pkey.verifier(signature, self.padfn, self.hashfn) verifier.update(payload) verifier.verify() class _RawEC(_RawJWS): def __init__(self, curve, hashfn): self._curve = curve self.hashfn = hashfn @property def curve(self): return self._curve def sign(self, key, payload): skey = key.get_op_key('sign', self._curve) signer = skey.signer(ec.ECDSA(self.hashfn)) signer.update(payload) signature = signer.finalize() r, s = ec_utils.decode_rfc6979_signature(signature) l = key.get_curve(self._curve).key_size return _encode_int(r, l) + _encode_int(s, l) def verify(self, key, payload, signature): pkey = key.get_op_key('verify', self._curve) r = signature[:len(signature) // 2] s = signature[len(signature) // 2:] enc_signature = ec_utils.encode_rfc6979_signature( int(hexlify(r), 16), int(hexlify(s), 16)) verifier = pkey.verifier(enc_signature, ec.ECDSA(self.hashfn)) verifier.update(payload) verifier.verify() class _RawNone(_RawJWS): def sign(self, key, payload): return '' def verify(self, key, payload, signature): raise InvalidSignature('The "none" signature cannot be verified') class _HS256(_RawHMAC, JWAAlgorithm): name = "HS256" description = "HMAC using SHA-256" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS256, self).__init__(hashes.SHA256()) class _HS384(_RawHMAC, JWAAlgorithm): name = "HS384" description = "HMAC using SHA-384" keysize = 384 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS384, self).__init__(hashes.SHA384()) class _HS512(_RawHMAC, JWAAlgorithm): name = "HS512" description = "HMAC using SHA-512" keysize = 512 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS512, self).__init__(hashes.SHA512()) class _RS256(_RawRSA, JWAAlgorithm): name = "RS256" description = "RSASSA-PKCS1-v1_5 using SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS256, self).__init__(padding.PKCS1v15(), hashes.SHA256()) class _RS384(_RawRSA, JWAAlgorithm): name = "RS384" description = "RSASSA-PKCS1-v1_5 using SHA-384" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS384, self).__init__(padding.PKCS1v15(), hashes.SHA384()) class _RS512(_RawRSA, JWAAlgorithm): name = "RS512" description = "RSASSA-PKCS1-v1_5 using SHA-512" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS512, self).__init__(padding.PKCS1v15(), hashes.SHA512()) class _ES256(_RawEC, JWAAlgorithm): name = "ES256" description = "ECDSA using P-256 and SHA-256" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES256, self).__init__('P-256', hashes.SHA256()) class _ES384(_RawEC, JWAAlgorithm): name = "ES384" description = "ECDSA using P-384 and SHA-384" keysize = 384 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES384, self).__init__('P-384', hashes.SHA384()) class _ES512(_RawEC, JWAAlgorithm): name = "ES512" description = "ECDSA using P-521 and SHA-512" keysize = 512 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES512, self).__init__('P-521', hashes.SHA512()) class _PS256(_RawRSA, JWAAlgorithm): name = "PS256" description = "RSASSA-PSS using SHA-256 and MGF1 with SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA256()), hashes.SHA256.digest_size) super(_PS256, self).__init__(padfn, hashes.SHA256()) class _PS384(_RawRSA, JWAAlgorithm): name = "PS384" description = "RSASSA-PSS using SHA-384 and MGF1 with SHA-384" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA384()), hashes.SHA384.digest_size) super(_PS384, self).__init__(padfn, hashes.SHA384()) class _PS512(_RawRSA, JWAAlgorithm): name = "PS512" description = "RSASSA-PSS using SHA-512 and MGF1 with SHA-512" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA512()), hashes.SHA512.digest_size) super(_PS512, self).__init__(padfn, hashes.SHA512()) class _None(_RawNone, JWAAlgorithm): name = "none" description = "No digital signature or MAC performed" keysize = 0 algorithm_usage_location = 'alg' algorithm_use = 'sig' class _RawKeyMgmt(object): def wrap(self, key, bitsize, cek, headers): raise NotImplementedError def unwrap(self, key, bitsize, ek, headers): raise NotImplementedError class _RSA(_RawKeyMgmt): def __init__(self, padfn): self.padfn = padfn def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'RSA': raise InvalidJWEKeyType('RSA', key.key_type) # FIXME: get key size and insure > 2048 bits def wrap(self, key, bitsize, cek, headers): self._check_key(key) if not cek: cek = _randombits(bitsize) rk = key.get_op_key('wrapKey') ek = rk.encrypt(cek, self.padfn) return {'cek': cek, 'ek': ek} def unwrap(self, key, bitsize, ek, headers): self._check_key(key) rk = key.get_op_key('decrypt') cek = rk.decrypt(ek, self.padfn) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _Rsa15(_RSA, JWAAlgorithm): name = 'RSA1_5' description = "RSAES-PKCS1-v1_5" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_Rsa15, self).__init__(padding.PKCS1v15()) class _RsaOaep(_RSA, JWAAlgorithm): name = 'RSA-OAEP' description = "RSAES OAEP using default parameters" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_RsaOaep, self).__init__( padding.OAEP(padding.MGF1(hashes.SHA1()), hashes.SHA1(), None)) class _RsaOaep256(_RSA, JWAAlgorithm): # noqa: ignore=N801 name = 'RSA-OAEP-256' description = "RSAES OAEP using SHA-256 and MGF1 with SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_RsaOaep256, self).__init__( padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None)) class _AesKw(_RawKeyMgmt): keysize = None def __init__(self): self.backend = default_backend() def _get_key(self, key, op): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) rk = base64url_decode(key.get_op_key(op)) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, _bitsize(rk)) return rk def wrap(self, key, bitsize, cek, headers): rk = self._get_key(key, 'encrypt') if not cek: cek = _randombits(bitsize) # Implement RFC 3394 Key Unwrap - 2.2.2 # TODO: Use cryptography once issue #1733 is resolved iv = 'a6a6a6a6a6a6a6a6' a = unhexlify(iv) r = [cek[i:i + 8] for i in range(0, len(cek), 8)] n = len(r) for j in range(0, 6): for i in range(0, n): e = Cipher(algorithms.AES(rk), modes.ECB(), backend=self.backend).encryptor() b = e.update(a + r[i]) + e.finalize() a = _encode_int(_decode_int(b[:8]) ^ ((n * j) + i + 1), 64) r[i] = b[-8:] ek = a for i in range(0, n): ek += r[i] return {'cek': cek, 'ek': ek} def unwrap(self, key, bitsize, ek, headers): rk = self._get_key(key, 'decrypt') # Implement RFC 3394 Key Unwrap - 2.2.3 # TODO: Use cryptography once issue #1733 is resolved iv = 'a6a6a6a6a6a6a6a6' aiv = unhexlify(iv) r = [ek[i:i + 8] for i in range(0, len(ek), 8)] a = r.pop(0) n = len(r) for j in range(5, -1, -1): for i in range(n - 1, -1, -1): da = _decode_int(a) atr = _encode_int((da ^ ((n * j) + i + 1)), 64) + r[i] d = Cipher(algorithms.AES(rk), modes.ECB(), backend=self.backend).decryptor() b = d.update(atr) + d.finalize() a = b[:8] r[i] = b[-8:] if a != aiv: raise RuntimeError('Decryption Failed') cek = b''.join(r) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _A128KW(_AesKw, JWAAlgorithm): name = 'A128KW' description = "AES Key Wrap using 128-bit key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A192KW(_AesKw, JWAAlgorithm): name = 'A192KW' description = "AES Key Wrap using 192-bit key" keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A256KW(_AesKw, JWAAlgorithm): name = 'A256KW' description = "AES Key Wrap using 256-bit key" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _AesGcmKw(_RawKeyMgmt): keysize = None def __init__(self): self.backend = default_backend() def _get_key(self, key, op): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) rk = base64url_decode(key.get_op_key(op)) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, _bitsize(rk)) return rk def wrap(self, key, bitsize, cek, headers): rk = self._get_key(key, 'encrypt') if not cek: cek = _randombits(bitsize) iv = _randombits(96) cipher = Cipher(algorithms.AES(rk), modes.GCM(iv), backend=self.backend) encryptor = cipher.encryptor() ek = encryptor.update(cek) + encryptor.finalize() tag = encryptor.tag return {'cek': cek, 'ek': ek, 'header': {'iv': base64url_encode(iv), 'tag': base64url_encode(tag)}} def unwrap(self, key, bitsize, ek, headers): rk = self._get_key(key, 'decrypt') if 'iv' not in headers: raise ValueError('Invalid Header, missing "iv" parameter') iv = base64url_decode(headers['iv']) if 'tag' not in headers: raise ValueError('Invalid Header, missing "tag" parameter') tag = base64url_decode(headers['tag']) cipher = Cipher(algorithms.AES(rk), modes.GCM(iv, tag), backend=self.backend) decryptor = cipher.decryptor() cek = decryptor.update(ek) + decryptor.finalize() if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _A128GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A128GCMKW' description = "Key wrapping with AES GCM using 128-bit key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A192GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A192GCMKW' description = "Key wrapping with AES GCM using 192-bit key" keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A256GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A256GCMKW' description = "Key wrapping with AES GCM using 256-bit key" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _Pbes2HsAesKw(_RawKeyMgmt): name = None keysize = None hashsize = None def __init__(self): self.backend = default_backend() self.aeskwmap = {128: _A128KW, 192: _A192KW, 256: _A256KW} def _get_key(self, alg, key, p2s, p2c): if isinstance(key, bytes): plain = key else: plain = key.encode('utf8') salt = bytes(self.name.encode('utf8')) + b'\x00' + p2s if self.hashsize == 256: hashalg = hashes.SHA256() elif self.hashsize == 384: hashalg = hashes.SHA384() elif self.hashsize == 512: hashalg = hashes.SHA512() else: raise ValueError('Unknown Hash Size') kdf = PBKDF2HMAC(algorithm=hashalg, length=_inbytes(self.keysize), salt=salt, iterations=p2c, backend=self.backend) rk = kdf.derive(plain) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, len(rk)) return JWK(kty="oct", use="enc", k=base64url_encode(rk)) def wrap(self, key, bitsize, cek, headers): p2s = _randombits(128) p2c = 8192 kek = self._get_key(headers['alg'], key, p2s, p2c) aeskw = self.aeskwmap[self.keysize]() ret = aeskw.wrap(kek, bitsize, cek, headers) ret['header'] = {'p2s': base64url_encode(p2s), 'p2c': p2c} return ret def unwrap(self, key, bitsize, ek, headers): if 'p2s' not in headers: raise ValueError('Invalid Header, missing "p2s" parameter') if 'p2c' not in headers: raise ValueError('Invalid Header, missing "p2c" parameter') p2s = base64url_decode(headers['p2s']) p2c = headers['p2c'] kek = self._get_key(headers['alg'], key, p2s, p2c) aeskw = self.aeskwmap[self.keysize]() return aeskw.unwrap(kek, bitsize, ek, headers) class _Pbes2Hs256A128Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS256+A128KW' description = 'PBES2 with HMAC SHA-256 and "A128KW" wrapping' keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 256 class _Pbes2Hs384A192Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS384+A192KW' description = 'PBES2 with HMAC SHA-384 and "A192KW" wrapping' keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 384 class _Pbes2Hs512A256Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS512+A256KW' description = 'PBES2 with HMAC SHA-512 and "A256KW" wrapping' keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 512 class _Direct(_RawKeyMgmt, JWAAlgorithm): name = 'dir' description = "Direct use of a shared symmetric key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) def wrap(self, key, bitsize, cek, headers): self._check_key(key) if cek: return (cek, None) k = base64url_decode(key.get_op_key('encrypt')) if _bitsize(k) != bitsize: raise InvalidCEKeyLength(bitsize, _bitsize(k)) return {'cek': k} def unwrap(self, key, bitsize, ek, headers): self._check_key(key) if ek != b'': raise ValueError('Invalid Encryption Key.') cek = base64url_decode(key.get_op_key('decrypt')) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _EcdhEs(_RawKeyMgmt): name = 'ECDH-ES' description = "ECDH-ES using Concat KDF" algorithm_usage_location = 'alg' algorithm_use = 'kex' keysize = None def __init__(self): self.backend = default_backend() self.aeskwmap = {128: _A128KW, 192: _A192KW, 256: _A256KW} def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'EC': raise InvalidJWEKeyType('EC', key.key_type) def _derive(self, privkey, pubkey, alg, bitsize, headers): # OtherInfo is defined in NIST SP 56A 5.8.1.2.1 # AlgorithmID otherinfo = struct.pack('>I', len(alg)) otherinfo += bytes(alg.encode('utf8')) # PartyUInfo apu = base64url_decode(headers['apu']) if 'apu' in headers else b'' otherinfo += struct.pack('>I', len(apu)) otherinfo += apu # PartyVInfo apv = base64url_decode(headers['apv']) if 'apv' in headers else b'' otherinfo += struct.pack('>I', len(apv)) otherinfo += apv # SuppPubInfo otherinfo += struct.pack('>I', bitsize) # no SuppPrivInfo shared_key = privkey.exchange(ec.ECDH(), pubkey) ckdf = ConcatKDFHash(algorithm=hashes.SHA256(), length=_inbytes(bitsize), otherinfo=otherinfo, backend=self.backend) return ckdf.derive(shared_key) def wrap(self, key, bitsize, cek, headers): self._check_key(key) if self.keysize is None: if cek is not None: raise InvalidJWEOperation('ECDH-ES cannot use an existing CEK') alg = headers['enc'] else: bitsize = self.keysize alg = headers['alg'] epk = JWK.generate(kty=key.key_type, crv=key.key_curve) dk = self._derive(epk.get_op_key('unwrapKey'), key.get_op_key('wrapKey'), alg, bitsize, headers) if self.keysize is None: ret = {'cek': dk} else: aeskw = self.aeskwmap[bitsize]() kek = JWK(kty="oct", use="enc", k=base64url_encode(dk)) ret = aeskw.wrap(kek, bitsize, cek, headers) ret['header'] = {'epk': json_decode(epk.export_public())} return ret def unwrap(self, key, bitsize, ek, headers): if 'epk' not in headers: raise ValueError('Invalid Header, missing "epk" parameter') self._check_key(key) if self.keysize is None: alg = headers['enc'] else: bitsize = self.keysize alg = headers['alg'] epk = JWK(**headers['epk']) dk = self._derive(key.get_op_key('unwrapKey'), epk.get_op_key('wrapKey'), alg, bitsize, headers) if self.keysize is None: return dk else: aeskw = self.aeskwmap[bitsize]() kek = JWK(kty="oct", use="enc", k=base64url_encode(dk)) cek = aeskw.unwrap(kek, bitsize, ek, headers) return cek class _EcdhEsAes128Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A128KW' description = 'ECDH-ES using Concat KDF and "A128KW" wrapping' keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _EcdhEsAes192Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A192KW' description = 'ECDH-ES using Concat KDF and "A192KW" wrapping' keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _EcdhEsAes256Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A256KW' description = 'ECDH-ES using Concat KDF and "A128KW" wrapping' keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _RawJWE(object): def encrypt(self, k, a, m): raise NotImplementedError def decrypt(self, k, a, iv, e, t): raise NotImplementedError class _AesCbcHmacSha2(_RawJWE): keysize = None def __init__(self, hashfn): self.backend = default_backend() self.hashfn = hashfn self.blocksize = algorithms.AES.block_size self.wrap_key_size = self.keysize * 2 def _mac(self, k, a, iv, e): al = _encode_int(_bitsize(a), 64) h = hmac.HMAC(k, self.hashfn, backend=self.backend) h.update(a) h.update(iv) h.update(e) h.update(al) m = h.finalize() return m[:_inbytes(self.keysize)] # RFC 7518 - 5.2.2 def encrypt(self, k, a, m): """ Encrypt according to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authentication Data :param m: Plaintext Returns a dictionary with the computed data. """ hkey = k[:_inbytes(self.keysize)] ekey = k[_inbytes(self.keysize):] # encrypt iv = _randombits(self.blocksize) cipher = Cipher(algorithms.AES(ekey), modes.CBC(iv), backend=self.backend) encryptor = cipher.encryptor() padder = PKCS7(self.blocksize).padder() padded_data = padder.update(m) + padder.finalize() e = encryptor.update(padded_data) + encryptor.finalize() # mac t = self._mac(hkey, a, iv, e) return (iv, e, t) def decrypt(self, k, a, iv, e, t): """ Decrypt according to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ hkey = k[:_inbytes(self.keysize)] dkey = k[_inbytes(self.keysize):] # verify mac if not constant_time.bytes_eq(t, self._mac(hkey, a, iv, e)): raise InvalidSignature('Failed to verify MAC') # decrypt cipher = Cipher(algorithms.AES(dkey), modes.CBC(iv), backend=self.backend) decryptor = cipher.decryptor() d = decryptor.update(e) + decryptor.finalize() unpadder = PKCS7(self.blocksize).unpadder() return unpadder.update(d) + unpadder.finalize() class _A128CbcHs256(_AesCbcHmacSha2, JWAAlgorithm): name = 'A128CBC-HS256' description = "AES_128_CBC_HMAC_SHA_256 authenticated" keysize = 128 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A128CbcHs256, self).__init__(hashes.SHA256()) class _A192CbcHs384(_AesCbcHmacSha2, JWAAlgorithm): name = 'A192CBC-HS384' description = "AES_192_CBC_HMAC_SHA_384 authenticated" keysize = 192 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A192CbcHs384, self).__init__(hashes.SHA384()) class _A256CbcHs512(_AesCbcHmacSha2, JWAAlgorithm): name = 'A256CBC-HS512' description = "AES_256_CBC_HMAC_SHA_512 authenticated" keysize = 256 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A256CbcHs512, self).__init__(hashes.SHA512()) class _AesGcm(_RawJWE): keysize = None def __init__(self): self.backend = default_backend() self.wrap_key_size = self.keysize # RFC 7518 - 5.3 def encrypt(self, k, a, m): """ Encrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authentication Data :param m: Plaintext Returns a dictionary with the computed data. """ iv = _randombits(96) cipher = Cipher(algorithms.AES(k), modes.GCM(iv), backend=self.backend) encryptor = cipher.encryptor() encryptor.authenticate_additional_data(a) e = encryptor.update(m) + encryptor.finalize() return (iv, e, encryptor.tag) def decrypt(self, k, a, iv, e, t): """ Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t), backend=self.backend) decryptor = cipher.decryptor() decryptor.authenticate_additional_data(a) return decryptor.update(e) + decryptor.finalize() class _A128Gcm(_AesGcm, JWAAlgorithm): name = 'A128GCM' description = "AES GCM using 128-bit key" keysize = 128 algorithm_usage_location = 'enc' algorithm_use = 'enc' class _A192Gcm(_AesGcm, JWAAlgorithm): name = 'A192GCM' description = "AES GCM using 192-bit key" keysize = 192 algorithm_usage_location = 'enc' algorithm_use = 'enc' class _A256Gcm(_AesGcm, JWAAlgorithm): name = 'A256GCM' description = "AES GCM using 256-bit key" keysize = 256 algorithm_usage_location = 'enc' algorithm_use = 'enc' class JWA(object): """JWA Signing Algorithms. This class provides access to all JWA algorithms. """ algorithms_registry = { 'HS256': _HS256, 'HS384': _HS384, 'HS512': _HS512, 'RS256': _RS256, 'RS384': _RS384, 'RS512': _RS512, 'ES256': _ES256, 'ES384': _ES384, 'ES512': _ES512, 'PS256': _PS256, 'PS384': _PS384, 'PS512': _PS512, 'none': _None, 'RSA1_5': _Rsa15, 'RSA-OAEP': _RsaOaep, 'RSA-OAEP-256': _RsaOaep256, 'A128KW': _A128KW, 'A192KW': _A192KW, 'A256KW': _A256KW, 'dir': _Direct, 'ECDH-ES': _EcdhEs, 'ECDH-ES+A128KW': _EcdhEsAes128Kw, 'ECDH-ES+A192KW': _EcdhEsAes192Kw, 'ECDH-ES+A256KW': _EcdhEsAes256Kw, 'A128GCMKW': _A128GcmKw, 'A192GCMKW': _A192GcmKw, 'A256GCMKW': _A256GcmKw, 'PBES2-HS256+A128KW': _Pbes2Hs256A128Kw, 'PBES2-HS384+A192KW': _Pbes2Hs384A192Kw, 'PBES2-HS512+A256KW': _Pbes2Hs512A256Kw, 'A128CBC-HS256': _A128CbcHs256, 'A192CBC-HS384': _A192CbcHs384, 'A256CBC-HS512': _A256CbcHs512, 'A128GCM': _A128Gcm, 'A192GCM': _A192Gcm, 'A256GCM': _A256Gcm } @classmethod def instantiate_alg(cls, name, use=None): alg = cls.algorithms_registry[name] if use is not None and alg.algorithm_use != use: raise KeyError return alg() @classmethod def signing_alg(cls, name): try: return cls.instantiate_alg(name, use='sig') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Signign algorithm name' % name) @classmethod def keymgmt_alg(cls, name): try: return cls.instantiate_alg(name, use='kex') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Key Management algorithm name' % name) @classmethod def encryption_alg(cls, name): try: return cls.instantiate_alg(name, use='enc') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Encryption algorithm name' % name)
./CrossVul/dataset_final_sorted/CWE-200/py/bad_5206_0
crossvul-python_data_good_5542_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import math import urllib import urlparse from glance.common import auth from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_region'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id When using multi-tenant a URI might look like this (a storage URL): swift+https://example.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.auth_or_store_url = self.specs.get('auth_or_store_url') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user and self.key: return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key)) return '' def get_uri(self): auth_or_store_url = self.auth_or_store_url if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = self._get_credstring() auth_or_store_url = auth_or_store_url.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _( "URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj" ) LOG.error(_("Invalid store URI: %(reason)s") % locals()) raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) LOG.error(reason) raise exception.BadStoreUri() user, key = cred_parts self.user = urllib.unquote(user) self.key = urllib.unquote(key) else: self.user = None self.key = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI.") LOG.error(reason) raise exception.BadStoreUri() @property def swift_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' full_url = ''.join([auth_scheme, self.auth_or_store_url]) return full_url class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): self.snet = CONF.swift_enable_snet self.multi_tenant = CONF.swift_store_multi_tenant self.admin_tenants = CONF.swift_store_admin_tenants self.region = CONF.swift_store_region self.auth_version = self._option_get('swift_store_auth_version') self.storage_url = None self.token = None def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = CONF.swift_store_container if self.multi_tenant: if self.context is None: reason = _("Multi-tenant Swift storage requires a context.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.token = self.context.auth_tok self.key = None # multi-tenant uses tokens, not (passwords) if self.context.tenant and self.context.user: self.user = self.context.tenant + ':' + self.context.user if self.context.service_catalog: service_catalog = self.context.service_catalog self.storage_url = self._get_swift_endpoint(service_catalog) try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. _obj_size = CONF.swift_store_large_object_size self.large_object_size = _obj_size * ONE_MB _obj_chunk_size = CONF.swift_store_large_object_chunk_size self.large_object_chunk_size = _obj_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address def _get_swift_endpoint(self, service_catalog): return auth.get_endpoint(service_catalog, service_type='object-store') def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: (resp_headers, resp_body) = swift_conn.get_object( container=loc.container, obj=loc.obj, resp_chunk_size=self.CHUNKSIZE) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = resp_headers.get('content-length') return (ResponseIndexable(resp_body, length), length) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: resp_headers = swift_conn.head_object(container=loc.container, obj=loc.obj) return resp_headers.get('content-length', 0) except Exception: return 0 def _swift_connection_for_location(self, loc): if loc.user: return self._make_swift_connection( loc.swift_url, loc.user, loc.key, region=self.region) else: if self.multi_tenant: return self._make_swift_connection( None, self.user, None, storage_url=loc.swift_url, token=self.token) else: reason = (_("Location is missing user:password information.")) LOG.error(reason) raise exception.BadStoreUri(message=reason) def _make_swift_connection(self, auth_url, user, key, region=None, storage_url=None, token=None): """ Creates a connection using the Swift client library. :param auth_url The authentication for v1 style Swift auth or v2 style Keystone auth. :param user A string containing the tenant:user information. :param key A string containing the key/password for the connection. :param region A string containing the swift endpoint region :param storage_url A string containing the storage URL. :param token A string containing the token """ snet = self.snet auth_version = self.auth_version full_auth_url = (auth_url if not auth_url or auth_url.endswith('/') else auth_url + '/') LOG.debug(_("Creating Swift connection with " "(auth_address=%(full_auth_url)s, user=%(user)s, " "snet=%(snet)s, auth_version=%(auth_version)s)") % locals()) tenant_name = None if self.auth_version == '2': tenant_user = user.split(':') if len(tenant_user) != 2: reason = (_("Badly formed tenant:user '%(tenant_user)s' in " "Swift URI") % locals()) LOG.error(reason) raise exception.BadStoreUri() (tenant_name, user) = tenant_user if self.multi_tenant: #NOTE: multi-tenant supports v2 auth only return swiftclient.Connection( None, user, None, preauthurl=storage_url, preauthtoken=token, snet=snet, tenant_name=tenant_name, auth_version='2') else: os_options = {} if region: os_options['region_name'] = region return swiftclient.Connection( full_auth_url, user, key, snet=snet, os_options=os_options, tenant_name=tenant_name, auth_version=auth_version) def _option_get(self, param): result = getattr(CONF, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed Swift writes the image data using the scheme: ``swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<ID>` where: <USER> = ``swift_store_user`` <KEY> = ``swift_store_key`` <AUTH_ADDRESS> = ``swift_store_auth_address`` <CONTAINER> = ``swift_store_container`` <ID> = The id of the image being added :note Swift auth URLs by default use HTTPS. To specify an HTTP auth URL, you can specify http://someurl.com for the swift_store_auth_address config option :note Swift cannot natively/transparently handle objects >5GB in size. So, if the image is greater than 5GB, we write chunks of image data to Swift and then write an manifest to Swift that contains information about the chunks. This same chunking process is used by default for images of an unknown size, as pushing them directly to swift would fail if the image turns out to be greater than 5GB. """ swift_conn = self._make_swift_connection( self.full_auth_address, self.user, self.key, storage_url=self.storage_url, token=self.token) obj_name = str(image_id) if self.multi_tenant: # NOTE: When using multi-tenant we create containers for each # image so we can set permissions on each image in swift container = self.container + '_' + obj_name auth_or_store_url = self.storage_url else: container = self.container auth_or_store_url = self.auth_address create_container_if_missing(container, swift_conn) location = StoreLocation({'scheme': self.scheme, 'container': container, 'obj': obj_name, 'auth_or_store_url': auth_or_store_url, 'user': self.user, 'key': self.key}) LOG.debug(_("Adding image object '%(obj_name)s' " "to Swift") % locals()) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = swift_conn.put_object(container, obj_name, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (obj_name, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = swift_conn.put_object( container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") LOG.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if image_size == 0. LOG.debug(_("Deleting final zero-length chunk")) swift_conn.delete_object(container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the image_size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (container, obj_name) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly swift_conn.put_object(container, obj_name, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swiftclient.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "this location.")) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) LOG.error(msg) raise glance.store.BackendException(msg) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = swift_conn.head_object(loc.container, loc.obj) manifest = headers.get('x-object-manifest') except swiftclient.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) for segment in swift_conn.get_container(obj_container, prefix=obj_prefix)[1]: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... swift_conn.delete_object(obj_container, segment['name']) else: swift_conn.delete_object(loc.container, loc.obj) if self.multi_tenant: #NOTE: In multi-tenant mode containers are specific to # each object (Glance image) swift_conn.delete_container(loc.container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise def set_acls(self, location, public=False, read_tenants=[], write_tenants=[]): """ Sets the read and write access control list for an image in the backend store. :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :public A boolean indicating whether the image should be public. :read_tenants A list of tenant strings which should be granted read access for an image. :write_tenants A list of tenant strings which should be granted write access for an image. """ if self.multi_tenant: loc = location.store_location swift_conn = self._swift_connection_for_location(loc) headers = {} if public: headers['X-Container-Read'] = ".r:*" elif read_tenants: headers['X-Container-Read'] = ','.join(read_tenants) else: headers['X-Container-Read'] = '' write_tenants.extend(self.admin_tenants) if write_tenants: headers['X-Container-Write'] = ','.join(write_tenants) else: headers['X-Container-Write'] = '' try: swift_conn.post_container(loc.container, headers=headers) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result def create_container_if_missing(container, swift_conn): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param swift_conn: Connection to Swift """ try: swift_conn.head_container(container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: if CONF.swift_store_create_container_on_put: try: swift_conn.put_container(container) except swiftclient.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise
./CrossVul/dataset_final_sorted/CWE-200/py/good_5542_0
crossvul-python_data_bad_5543_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import math import urllib import urlparse from glance.common import auth from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_region'), cfg.StrOpt('swift_store_endpoint_type', default='publicURL'), cfg.StrOpt('swift_store_service_type', default='object-store'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id When using multi-tenant a URI might look like this (a storage URL): swift+https://example.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.auth_or_store_url = self.specs.get('auth_or_store_url') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user and self.key: return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key)) return '' def get_uri(self): auth_or_store_url = self.auth_or_store_url if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = self._get_credstring() auth_or_store_url = auth_or_store_url.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence " "of a scheme. If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the " "swift+http:// scheme, like so: " "swift+http://user:pass@authurl.com/v1/container/obj") LOG.debug(_("Invalid store uri %(uri)s: %(reason)s") % locals()) raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') if len(cred_parts) != 2: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % locals()) LOG.debug(reason) raise exception.BadStoreUri() user, key = cred_parts self.user = urllib.unquote(user) self.key = urllib.unquote(key) else: self.user = None self.key = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI: %s") % uri LOG.debug(reason) raise exception.BadStoreUri() @property def swift_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.auth_or_store_url.startswith('http'): return self.auth_or_store_url else: if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' return ''.join([auth_scheme, self.auth_or_store_url]) def Store(context=None, loc=None): if (CONF.swift_store_multi_tenant and (loc is None or loc.store_location.user is None)): return MultiTenantStore(context, loc) return SingleTenantStore(context, loc) class BaseStore(glance.store.base.Store): CHUNKSIZE = 65536 def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): _obj_size = self._option_get('swift_store_large_object_size') self.large_object_size = _obj_size * ONE_MB _chunk_size = self._option_get('swift_store_large_object_chunk_size') self.large_object_chunk_size = _chunk_size * ONE_MB self.admin_tenants = CONF.swift_store_admin_tenants self.region = CONF.swift_store_region self.service_type = CONF.swift_store_service_type self.endpoint_type = CONF.swift_store_endpoint_type self.snet = CONF.swift_enable_snet def get(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: resp_headers, resp_body = connection.get_object( container=location.container, obj=location.obj, resp_chunk_size=self.CHUNKSIZE) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = int(resp_headers.get('content-length', 0)) return (ResponseIndexable(resp_body, length), length) def get_size(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: resp_headers = connection.head_object( container=location.container, obj=location.obj) return int(resp_headers.get('content-length', 0)) except Exception: return 0 def _option_get(self, param): result = getattr(CONF, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size, connection=None): location = self.create_location(image_id) if not connection: connection = self.get_connection(location) self._create_container_if_missing(location.container, connection) LOG.debug(_("Adding image object '%(obj_name)s' " "to Swift") % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") LOG.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if size == 0. LOG.debug(_("Deleting final zero-length chunk")) connection.delete_object(location.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (location.container, location.obj) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swiftclient.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "location %s") % location.get_uri()) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) LOG.error(msg) raise glance.store.BackendException(msg) def delete(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = connection.head_object( location.container, location.obj) manifest = headers.get('x-object-manifest') except swiftclient.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) segments = connection.get_container( obj_container, prefix=obj_prefix)[1] for segment in segments: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... connection.delete_object( obj_container, segment['name']) else: connection.delete_object(location.container, location.obj) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise def _create_container_if_missing(self, container, connection): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param connection: Connection to swift service """ try: connection.head_container(container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: if CONF.swift_store_create_container_on_put: try: connection.put_container(container) except swiftclient.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise def get_connection(self): raise NotImplemented() def create_location(self): raise NotImplemented() class SingleTenantStore(BaseStore): EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" def configure(self): super(SingleTenantStore, self).configure() self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): self.auth_address = self._option_get('swift_store_auth_address') if self.auth_address.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' self.container = CONF.swift_store_container self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') def create_location(self, image_id): specs = {'scheme': self.scheme, 'container': self.container, 'obj': str(image_id), 'auth_or_store_url': self.auth_address, 'user': self.user, 'key': self.key} return StoreLocation(specs) def get_connection(self, location): if not location.user: reason = (_("Location is missing user:password information.")) LOG.debug(reason) raise exception.BadStoreUri(message=reason) auth_url = location.swift_url if not auth_url.endswith('/'): auth_url += '/' if self.auth_version == '2': try: tenant_name, user = location.user.split(':') except ValueError: reason = (_("Badly formed tenant:user '%(user)s' in " "Swift URI") % {'user': location.user}) LOG.debug(reason) raise exception.BadStoreUri() else: tenant_name = None user = location.user os_options = {} if self.region: os_options['region_name'] = self.region os_options['endpoint_type'] = self.endpoint_type os_options['service_type'] = self.service_type return swiftclient.Connection( auth_url, user, location.key, tenant_name=tenant_name, snet=self.snet, auth_version=self.auth_version, os_options=os_options) class MultiTenantStore(BaseStore): EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>" def configure_add(self): self.container = CONF.swift_store_container if self.context is None: reason = _("Multi-tenant Swift storage requires a context.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) if self.context.service_catalog is None: reason = _("Multi-tenant Swift storage requires " "a service catalog.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.storage_url = auth.get_endpoint( self.context.service_catalog, service_type=self.service_type, endpoint_region=self.region, endpoint_type=self.endpoint_type) if self.storage_url.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' def delete(self, location, connection=None): if not connection: connection = self.get_connection(location.store_location) super(MultiTenantStore, self).delete(location, connection) connection.delete_container(location.store_location.container) def set_acls(self, location, public=False, read_tenants=None, write_tenants=None, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) if read_tenants is None: read_tenants = [] if write_tenants is None: write_tenants = [] headers = {} if public: headers['X-Container-Read'] = ".r:*" elif read_tenants: headers['X-Container-Read'] = ','.join(read_tenants) else: headers['X-Container-Read'] = '' write_tenants.extend(self.admin_tenants) if write_tenants: headers['X-Container-Write'] = ','.join(write_tenants) else: headers['X-Container-Write'] = '' try: connection.post_container(location.container, headers=headers) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise def create_location(self, image_id): specs = {'scheme': self.scheme, 'container': self.container + '_' + str(image_id), 'obj': str(image_id), 'auth_or_store_url': self.storage_url} return StoreLocation(specs) def get_connection(self, location): return swiftclient.Connection( None, self.context.user, None, preauthurl=location.swift_url, preauthtoken=self.context.auth_tok, tenant_name=self.context.tenant, auth_version='2', snet=self.snet) class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result
./CrossVul/dataset_final_sorted/CWE-200/py/bad_5543_0
crossvul-python_data_bad_1725_0
# -*- coding: utf-8 -*- ''' Support for the Git SCM ''' from __future__ import absolute_import # Import python libs import os import subprocess # Import salt libs from salt import utils from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error from salt.ext.six.moves.urllib.parse import urlunparse as _urlunparse # pylint: disable=no-name-in-module,import-error def __virtual__(): ''' Only load if git exists on the system ''' return True if utils.which('git') else False def _git_run(cmd, cwd=None, runas=None, identity=None, **kwargs): ''' simple, throw an exception with the error message on an error return code. this function may be moved to the command module, spliced with 'cmd.run_all', and used as an alternative to 'cmd.run_all'. Some commands don't return proper retcodes, so this can't replace 'cmd.run_all'. ''' env = {} if identity: stderrs = [] # if the statefile provides multiple identities, they need to be tried # (but also allow a string instead of a list) if not isinstance(identity, list): # force it into a list identity = [identity] # try each of the identities, independently for id_file in identity: env = { 'GIT_IDENTITY': id_file } # copy wrapper to area accessible by ``runas`` user # currently no suppport in windows for wrapping git ssh if not utils.is_windows(): ssh_id_wrapper = os.path.join(utils.templates.TEMPLATE_DIRNAME, 'git/ssh-id-wrapper') tmp_file = utils.mkstemp() utils.files.copyfile(ssh_id_wrapper, tmp_file) os.chmod(tmp_file, 0o500) os.chown(tmp_file, __salt__['file.user_to_uid'](runas), -1) env['GIT_SSH'] = tmp_file try: result = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=runas, env=env, python_shell=False, **kwargs) finally: if 'GIT_SSH' in env: os.remove(env['GIT_SSH']) # if the command was successful, no need to try additional IDs if result['retcode'] == 0: return result['stdout'] else: stderrs.append(result['stderr']) # we've tried all IDs and still haven't passed, so error out raise CommandExecutionError("\n\n".join(stderrs)) else: result = __salt__['cmd.run_all'](cmd, cwd=cwd, runas=runas, env=env, python_shell=False, **kwargs) retcode = result['retcode'] if retcode == 0: return result['stdout'] else: raise CommandExecutionError( 'Command {0!r} failed. Stderr: {1!r}'.format(cmd, result['stderr'])) def _git_getdir(cwd, user=None): ''' Returns the absolute path to the top-level of a given repo because some Git commands are sensitive to where they're run from (archive for one) ''' cmd_bare = 'git rev-parse --is-bare-repository' is_bare = __salt__['cmd.run_stdout'](cmd_bare, cwd, runas=user) == 'true' if is_bare: return cwd cmd_toplvl = 'git rev-parse --show-toplevel' return __salt__['cmd.run'](cmd_toplvl, cwd) def _check_git(): ''' Check if git is available ''' utils.check_or_die('git') def _add_http_basic_auth(repository, https_user=None, https_pass=None): if https_user is None and https_pass is None: return repository else: urltuple = _urlparse(repository) if urltuple.scheme == 'https': if https_pass: auth_string = "{0}:{1}".format(https_user, https_pass) else: auth_string = https_user netloc = "{0}@{1}".format(auth_string, urltuple.netloc) urltuple = urltuple._replace(netloc=netloc) return _urlunparse(urltuple) else: raise ValueError('Basic Auth only supported for HTTPS scheme') def current_branch(cwd, user=None): ''' Returns the current branch name, if on a branch. CLI Example: .. code-block:: bash salt '*' git.current_branch /path/to/repo ''' cmd = r'git rev-parse --abbrev-ref HEAD' return __salt__['cmd.run_stdout'](cmd, cwd=cwd, runas=user) def revision(cwd, rev='HEAD', short=False, user=None): ''' Returns the long hash of a given identifier (hash, branch, tag, HEAD, etc) cwd The path to the Git repository rev: HEAD The revision short: False Return an abbreviated SHA1 git hash user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.revision /path/to/repo mybranch ''' _check_git() cmd = 'git rev-parse {0}{1}'.format('--short ' if short else '', rev) return _git_run(cmd, cwd, runas=user) def clone(cwd, repository, opts=None, user=None, identity=None, https_user=None, https_pass=None): ''' Clone a new repository cwd The path to the Git repository repository The git URI of the repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 20515.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.clone /path/to/repo git://github.com/saltstack/salt.git salt '*' git.clone /path/to/repo.git\\ git://github.com/saltstack/salt.git '--bare --origin github' ''' _check_git() repository = _add_http_basic_auth(repository, https_user, https_pass) if not opts: opts = '' if utils.is_windows(): cmd = 'git clone {0} {1} {2}'.format(repository, cwd, opts) else: cmd = 'git clone {0} {1!r} {2}'.format(repository, cwd, opts) return _git_run(cmd, runas=user, identity=identity) def describe(cwd, rev='HEAD', user=None): ''' Returns the git describe string (or the SHA hash if there are no tags) for the given revision cwd The path to the Git repository rev: HEAD The revision to describe user : None Run git as a user other than what the minion runs as CLI Examples: .. code-block:: bash salt '*' git.describe /path/to/repo salt '*' git.describe /path/to/repo develop ''' cmd = 'git describe {0}'.format(rev) return __salt__['cmd.run_stdout'](cmd, cwd=cwd, runas=user, python_shell=False) def archive(cwd, output, rev='HEAD', fmt=None, prefix=None, user=None): ''' Export a tarball from the repository cwd The path to the Git repository output The path to the archive tarball rev: HEAD The revision to create an archive from fmt: None Format of the resulting archive, zip and tar are commonly used prefix : None Prepend <prefix>/ to every filename in the archive user : None Run git as a user other than what the minion runs as If ``prefix`` is not specified it defaults to the basename of the repo directory. CLI Example: .. code-block:: bash salt '*' git.archive /path/to/repo /path/to/archive.tar.gz ''' _check_git() basename = '{0}/'.format(os.path.basename(_git_getdir(cwd, user=user))) cmd = 'git archive{prefix}{fmt} -o {output} {rev}'.format( rev=rev, output=output, fmt=' --format={0}'.format(fmt) if fmt else '', prefix=' --prefix="{0}"'.format(prefix if prefix else basename) ) return _git_run(cmd, cwd=cwd, runas=user) def fetch(cwd, opts=None, user=None, identity=None): ''' Perform a fetch on the given repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.fetch /path/to/repo '--all' salt '*' git.fetch cwd=/path/to/repo opts='--all' user=johnny ''' _check_git() if not opts: opts = '' cmd = 'git fetch {0}'.format(opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def pull(cwd, opts=None, user=None, identity=None): ''' Perform a pull on the given repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.pull /path/to/repo opts='--rebase origin master' ''' _check_git() if not opts: opts = '' return _git_run('git pull {0}'.format(opts), cwd=cwd, runas=user, identity=identity) def rebase(cwd, rev='master', opts=None, user=None): ''' Rebase the current branch cwd The path to the Git repository rev : master The revision to rebase onto the current branch opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.rebase /path/to/repo master salt '*' git.rebase /path/to/repo 'origin master' That is the same as: .. code-block:: bash git rebase master git rebase origin master ''' _check_git() if not opts: opts = '' return _git_run('git rebase {0} {1}'.format(opts, rev), cwd=cwd, runas=user) def checkout(cwd, rev, force=False, opts=None, user=None): ''' Checkout a given revision cwd The path to the Git repository rev The remote branch or revision to checkout force : False Force a checkout even if there might be overwritten changes opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Examples: .. code-block:: bash salt '*' git.checkout /path/to/repo somebranch user=jeff salt '*' git.checkout /path/to/repo opts='testbranch -- conf/file1 file2' salt '*' git.checkout /path/to/repo rev=origin/mybranch opts=--track ''' _check_git() if not opts: opts = '' cmd = 'git checkout {0} {1} {2}'.format(' -f' if force else '', rev, opts) return _git_run(cmd, cwd=cwd, runas=user) def merge(cwd, branch='@{upstream}', opts=None, user=None): ''' Merge a given branch cwd The path to the Git repository branch : @{upstream} The remote branch or revision to merge into the current branch opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.fetch /path/to/repo salt '*' git.merge /path/to/repo @{upstream} ''' _check_git() if not opts: opts = '' cmd = 'git merge {0} {1}'.format(branch, opts) return _git_run(cmd, cwd, runas=user) def init(cwd, opts=None, user=None): ''' Initialize a new git repository cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.init /path/to/repo.git opts='--bare' ''' _check_git() if not opts: opts = '' cmd = 'git init {0} {1}'.format(cwd, opts) return _git_run(cmd, runas=user) def submodule(cwd, init=True, opts=None, user=None, identity=None): ''' Initialize git submodules cwd The path to the Git repository init : True Ensure that new submodules are initialized opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.submodule /path/to/repo.git/sub/repo ''' _check_git() if not opts: opts = '' cmd = 'git submodule update {0} {1}'.format('--init' if init else '', opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def status(cwd, user=None): ''' Return the status of the repository. The returned format uses the status codes of git's 'porcelain' output mode cwd The path to the Git repository user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.status /path/to/git/repo ''' cmd = 'git status -z --porcelain' stdout = _git_run(cmd, cwd=cwd, runas=user) state_by_file = [] for line in stdout.split("\0"): state = line[:2] filename = line[3:] if filename != '' and state != '': state_by_file.append((state, filename)) return state_by_file def add(cwd, file_name, user=None, opts=None): ''' add a file to git cwd The path to the Git repository file_name Path to the file in the cwd opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.add /path/to/git/repo /path/to/file ''' if not opts: opts = '' cmd = 'git add {0} {1}'.format(file_name, opts) return _git_run(cmd, cwd=cwd, runas=user) def rm(cwd, file_name, user=None, opts=None): ''' Remove a file from git cwd The path to the Git repository file_name Path to the file in the cwd opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.rm /path/to/git/repo /path/to/file ''' if not opts: opts = '' cmd = 'git rm {0} {1}'.format(file_name, opts) return _git_run(cmd, cwd=cwd, runas=user) def commit(cwd, message, user=None, opts=None): ''' create a commit cwd The path to the Git repository message The commit message opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.commit /path/to/git/repo 'The commit message' ''' cmd = subprocess.list2cmdline(['git', 'commit', '-m', message]) # add opts separately; they don't need to be quoted if opts: cmd = cmd + ' ' + opts return _git_run(cmd, cwd=cwd, runas=user) def push(cwd, remote_name, branch='master', user=None, opts=None, identity=None): ''' Push to remote cwd The path to the Git repository remote_name Name of the remote to push to branch : master Name of the branch to push opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as identity : None A path to a private key to use over SSH CLI Example: .. code-block:: bash salt '*' git.push /path/to/git/repo remote-name ''' if not opts: opts = '' cmd = 'git push {0} {1} {2}'.format(remote_name, branch, opts) return _git_run(cmd, cwd=cwd, runas=user, identity=identity) def remotes(cwd, user=None): ''' Get remotes like git remote -v cwd The path to the Git repository user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.remotes /path/to/repo ''' cmd = 'git remote' ret = _git_run(cmd, cwd=cwd, runas=user) res = dict() for remote_name in ret.splitlines(): remote = remote_name.strip() res[remote] = remote_get(cwd, remote, user=user) return res def remote_get(cwd, remote='origin', user=None): ''' get the fetch and push URL for a specified remote name remote : origin the remote name used to define the fetch and push URL user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.remote_get /path/to/repo salt '*' git.remote_get /path/to/repo upstream ''' try: cmd = 'git remote show -n {0}'.format(remote) ret = _git_run(cmd, cwd=cwd, runas=user) lines = ret.splitlines() remote_fetch_url = lines[1].replace('Fetch URL: ', '').strip() remote_push_url = lines[2].replace('Push URL: ', '').strip() if remote_fetch_url != remote and remote_push_url != remote: res = (remote_fetch_url, remote_push_url) return res else: return None except CommandExecutionError: return None def remote_set(cwd, name='origin', url=None, user=None, https_user=None, https_pass=None): ''' sets a remote with name and URL like git remote add <remote_name> <remote_url> remote_name : origin defines the remote name remote_url : None defines the remote URL; should not be None! user : None Run git as a user other than what the minion runs as https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.remote_set /path/to/repo remote_url=git@github.com:saltstack/salt.git salt '*' git.remote_set /path/to/repo origin git@github.com:saltstack/salt.git ''' if remote_get(cwd, name): cmd = 'git remote rm {0}'.format(name) _git_run(cmd, cwd=cwd, runas=user) url = _add_http_basic_auth(url, https_user, https_pass) cmd = 'git remote add {0} {1}'.format(name, url) _git_run(cmd, cwd=cwd, runas=user) return remote_get(cwd=cwd, remote=name, user=None) def branch(cwd, rev, opts=None, user=None): ''' Interacts with branches. cwd The path to the Git repository rev The branch/revision to be used in the command. opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.branch mybranch --set-upstream-to=origin/mybranch ''' cmd = 'git branch {0} {1}'.format(rev, opts) _git_run(cmd, cwd=cwd, user=user) return current_branch(cwd, user=user) def reset(cwd, opts=None, user=None): ''' Reset the repository checkout cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.reset /path/to/repo master ''' _check_git() if not opts: opts = '' return _git_run('git reset {0}'.format(opts), cwd=cwd, runas=user) def stash(cwd, opts=None, user=None): ''' Stash changes in the repository checkout cwd The path to the Git repository opts : None Any additional options to add to the command line user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.stash /path/to/repo master ''' _check_git() if not opts: opts = '' return _git_run('git stash {0}'.format(opts), cwd=cwd, runas=user) def config_set(cwd=None, setting_name=None, setting_value=None, user=None, is_global=False): ''' Set a key in the git configuration file (.git/config) of the repository or globally. cwd : None Options path to the Git repository .. versionchanged:: 2014.7.0 Made ``cwd`` optional setting_name : None The name of the configuration key to set. Required. setting_value : None The (new) value to set. Required. user : None Run git as a user other than what the minion runs as is_global : False Set to True to use the '--global' flag with 'git config' CLI Example: .. code-block:: bash salt '*' git.config_set /path/to/repo user.email me@example.com ''' if setting_name is None or setting_value is None: raise TypeError('Missing required parameter setting_name for git.config_set') if cwd is None and not is_global: raise SaltInvocationError('Either `is_global` must be set to True or ' 'you must provide `cwd`') if is_global: cmd = 'git config --global {0} "{1}"'.format(setting_name, setting_value) else: cmd = 'git config {0} "{1}"'.format(setting_name, setting_value) _check_git() return _git_run(cmd, cwd=cwd, runas=user) def config_get(cwd=None, setting_name=None, user=None): ''' Get a key or keys from the git configuration file (.git/config). cwd : None Optional path to a Git repository .. versionchanged:: 2014.7.0 Made ``cwd`` optional setting_name : None The name of the configuration key to get. Required. user : None Run git as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' git.config_get setting_name=user.email salt '*' git.config_get /path/to/repo user.name arthur ''' if setting_name is None: raise TypeError('Missing required parameter setting_name for git.config_get') _check_git() return _git_run('git config {0}'.format(setting_name), cwd=cwd, runas=user) def ls_remote(cwd, repository="origin", branch="master", user=None, identity=None, https_user=None, https_pass=None): ''' Returns the upstream hash for any given URL and branch. cwd The path to the Git repository repository: origin The name of the repository to get the revision from. Can be the name of a remote, an URL, etc. branch: master The name of the branch to get the revision from. user : none run git as a user other than what the minion runs as identity : none a path to a private key to use over ssh https_user : None HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass : None HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' git.ls_remote /pat/to/repo origin master ''' _check_git() repository = _add_http_basic_auth(repository, https_user, https_pass) cmd = ' '.join(["git", "ls-remote", "-h", str(repository), str(branch), "| cut -f 1"]) return _git_run(cmd, cwd=cwd, runas=user, identity=identity)
./CrossVul/dataset_final_sorted/CWE-200/py/bad_1725_0
crossvul-python_data_good_3325_0
# -*- coding: utf-8 -*- ''' Classes that manage file clients ''' from __future__ import absolute_import # Import python libs import contextlib import errno import logging import os import string import shutil import ftplib from tornado.httputil import parse_response_start_line, HTTPInputError # Import salt libs from salt.exceptions import ( CommandExecutionError, MinionError ) import salt.client import salt.crypt import salt.loader import salt.payload import salt.transport import salt.fileserver import salt.utils import salt.utils.files import salt.utils.templates import salt.utils.url import salt.utils.gzip_util import salt.utils.http import salt.ext.six as six from salt.utils.locales import sdecode from salt.utils.openstack.swift import SaltSwift # pylint: disable=no-name-in-module,import-error import salt.ext.six.moves.BaseHTTPServer as BaseHTTPServer from salt.ext.six.moves.urllib.error import HTTPError, URLError from salt.ext.six.moves.urllib.parse import urlparse, urlunparse # pylint: enable=no-name-in-module,import-error log = logging.getLogger(__name__) def get_file_client(opts, pillar=False): ''' Read in the ``file_client`` option and return the correct type of file server ''' client = opts.get('file_client', 'remote') if pillar and client == 'local': client = 'pillar' return { 'remote': RemoteClient, 'local': FSClient, 'pillar': LocalClient, }.get(client, RemoteClient)(opts) def decode_dict_keys_to_str(src): ''' Convert top level keys from bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types. ''' if not six.PY3 or not isinstance(src, dict): return src output = {} for key, val in six.iteritems(src): if isinstance(key, bytes): try: key = key.decode() except UnicodeError: pass output[key] = val return output class Client(object): ''' Base class for Salt file interactions ''' def __init__(self, opts): self.opts = opts self.utils = salt.loader.utils(self.opts) self.serial = salt.payload.Serial(self.opts) # Add __setstate__ and __getstate__ so that the object may be # deep copied. It normally can't be deep copied because its # constructor requires an 'opts' parameter. # The TCP transport needs to be able to deep copy this class # due to 'salt.utils.context.ContextDict.clone'. def __setstate__(self, state): # This will polymorphically call __init__ # in the derived class. self.__init__(state['opts']) def __getstate__(self): return {'opts': self.opts} def _check_proto(self, path): ''' Make sure that this path is intended for the salt master and trim it ''' if not path.startswith('salt://'): raise MinionError(u'Unsupported path: {0}'.format(path)) file_path, saltenv = salt.utils.url.parse(path) return file_path def _file_local_list(self, dest): ''' Helper util to return a list of files in a directory ''' if os.path.isdir(dest): destdir = dest else: destdir = os.path.dirname(dest) filelist = set() for root, dirs, files in os.walk(destdir, followlinks=True): for name in files: path = os.path.join(root, name) filelist.add(path) return filelist @contextlib.contextmanager def _cache_loc(self, path, saltenv='base', cachedir=None): ''' Return the local location to cache the file, cache dirs will be made ''' if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) dest = salt.utils.path_join(cachedir, 'files', saltenv, path) destdir = os.path.dirname(dest) cumask = os.umask(63) # remove destdir if it is a regular file to avoid an OSError when # running os.makedirs below if os.path.isfile(destdir): os.remove(destdir) # ensure destdir exists try: os.makedirs(destdir) except OSError as exc: if exc.errno != errno.EEXIST: # ignore if it was there already raise yield dest os.umask(cumask) def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Copies a file from the local files or master depending on implementation ''' raise NotImplementedError def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs ''' raise NotImplementedError def cache_file(self, path, saltenv='base', cachedir=None): ''' Pull a file down from the file server and store it in the minion file cache ''' return self.get_url(path, '', True, saltenv, cachedir=cachedir) def cache_files(self, paths, saltenv='base', cachedir=None): ''' Download a list of files stored on the master and put them in the minion file cache ''' ret = [] if isinstance(paths, str): paths = paths.split(',') for path in paths: ret.append(self.cache_file(path, saltenv, cachedir=cachedir)) return ret def cache_master(self, saltenv='base', cachedir=None): ''' Download and cache all files on a master in a specified environment ''' ret = [] for path in self.file_list(saltenv): ret.append( self.cache_file( salt.utils.url.create(path), saltenv, cachedir=cachedir) ) return ret def cache_dir(self, path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None, cachedir=None): ''' Download all of the files in a subdir of the master ''' ret = [] path = self._check_proto(sdecode(path)) # We want to make sure files start with this *directory*, use # '/' explicitly because the master (that's generating the # list of files) only runs on POSIX if not path.endswith('/'): path = path + '/' log.info( 'Caching directory \'{0}\' for environment \'{1}\''.format( path, saltenv ) ) # go through the list of all files finding ones that are in # the target directory and caching them for fn_ in self.file_list(saltenv): fn_ = sdecode(fn_) if fn_.strip() and fn_.startswith(path): if salt.utils.check_include_exclude( fn_, include_pat, exclude_pat): fn_ = self.cache_file( salt.utils.url.create(fn_), saltenv, cachedir=cachedir) if fn_: ret.append(fn_) if include_empty: # Break up the path into a list containing the bottom-level # directory (the one being recursively copied) and the directories # preceding it # separated = string.rsplit(path, '/', 1) # if len(separated) != 2: # # No slashes in path. (So all files in saltenv will be copied) # prefix = '' # else: # prefix = separated[0] if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) dest = salt.utils.path_join(cachedir, 'files', saltenv) for fn_ in self.file_list_emptydirs(saltenv): fn_ = sdecode(fn_) if fn_.startswith(path): minion_dir = '{0}/{1}'.format(dest, fn_) if not os.path.isdir(minion_dir): os.makedirs(minion_dir) ret.append(minion_dir) return ret def cache_local_file(self, path, **kwargs): ''' Cache a local file on the minion in the localfiles cache ''' dest = os.path.join(self.opts['cachedir'], 'localfiles', path.lstrip('/')) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) shutil.copyfile(path, dest) return dest def file_local_list(self, saltenv='base'): ''' List files in the local minion files and localfiles caches ''' filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv) localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles') fdest = self._file_local_list(filesdest) ldest = self._file_local_list(localfilesdest) return sorted(fdest.union(ldest)) def file_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return [] def dir_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return [] def symlink_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return {} def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return '' def list_states(self, saltenv): ''' Return a list of all available sls modules on the master for a given environment ''' limit_traversal = self.opts.get('fileserver_limit_traversal', False) states = [] if limit_traversal: if saltenv not in self.opts['file_roots']: log.warning( 'During an attempt to list states for saltenv \'{0}\', ' 'the environment could not be found in the configured ' 'file roots'.format(saltenv) ) return states for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk(path, topdown=True): log.debug('Searching for states in dirs {0} and files ' '{1}'.format(dirs, files)) if not [filename.endswith('.sls') for filename in files]: # Use shallow copy so we don't disturb the memory used by os.walk. Otherwise this breaks! del dirs[:] else: for found_file in files: stripped_root = os.path.relpath(root, path).replace('/', '.') if salt.utils.is_windows(): stripped_root = stripped_root.replace('\\', '/') if found_file.endswith(('.sls')): if found_file.endswith('init.sls'): if stripped_root.endswith('.'): stripped_root = stripped_root.rstrip('.') states.append(stripped_root) else: if not stripped_root.endswith('.'): stripped_root += '.' if stripped_root.startswith('.'): stripped_root = stripped_root.lstrip('.') states.append(stripped_root + found_file[:-4]) else: for path in self.file_list(saltenv): if salt.utils.is_windows(): path = path.replace('\\', '/') if path.endswith('.sls'): # is an sls module! if path.endswith('{0}init.sls'.format('/')): states.append(path.replace('/', '.')[:-9]) else: states.append(path.replace('/', '.')[:-4]) return states def get_state(self, sls, saltenv, cachedir=None): ''' Get a state file from the master and store it in the local minion cache; return the location of the file ''' if '.' in sls: sls = sls.replace('.', '/') sls_url = salt.utils.url.create(sls + '.sls') init_url = salt.utils.url.create(sls + '/init.sls') for path in [sls_url, init_url]: dest = self.cache_file(path, saltenv, cachedir=cachedir) if dest: return {'source': path, 'dest': dest} return {} def get_dir(self, path, dest='', saltenv='base', gzip=None, cachedir=None): ''' Get a directory recursively from the salt-master ''' ret = [] # Strip trailing slash path = self._check_proto(path).rstrip('/') # Break up the path into a list containing the bottom-level directory # (the one being recursively copied) and the directories preceding it separated = path.rsplit('/', 1) if len(separated) != 2: # No slashes in path. (This means all files in saltenv will be # copied) prefix = '' else: prefix = separated[0] # Copy files from master for fn_ in self.file_list(saltenv, prefix=path): # Prevent files in "salt://foobar/" (or salt://foo.sh) from # matching a path of "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = fn_[len(prefix):].lstrip('/') ret.append( self.get_file( salt.utils.url.create(fn_), '{0}/{1}'.format(dest, minion_relpath), True, saltenv, gzip ) ) # Replicate empty dirs from master try: for fn_ in self.file_list_emptydirs(saltenv, prefix=path): # Prevent an empty dir "salt://foobar/" from matching a path of # "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = fn_[len(prefix):].lstrip('/') minion_mkdir = '{0}/{1}'.format(dest, minion_relpath) if not os.path.isdir(minion_mkdir): os.makedirs(minion_mkdir) ret.append(minion_mkdir) except TypeError: pass ret.sort() return ret def get_url(self, url, dest, makedirs=False, saltenv='base', no_cache=False, cachedir=None): ''' Get a single file from a URL. ''' url_data = urlparse(url) url_scheme = url_data.scheme url_path = os.path.join( url_data.netloc, url_data.path).rstrip(os.sep) if url_scheme and url_scheme.lower() in string.ascii_lowercase: url_path = ':'.join((url_scheme, url_path)) url_scheme = 'file' if url_scheme in ('file', ''): # Local filesystem if not os.path.isabs(url_path): raise CommandExecutionError( 'Path \'{0}\' is not absolute'.format(url_path) ) if dest is None: with salt.utils.fopen(url_path, 'r') as fp_: data = fp_.read() return data return url_path if url_scheme == 'salt': result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir) if result and dest is None: with salt.utils.fopen(result, 'r') as fp_: data = fp_.read() return data return result if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return '' elif not no_cache: dest = self._extrn_path(url, saltenv, cachedir=cachedir) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) if url_data.scheme == 's3': try: def s3_opt(key, default=None): '''Get value of s3.<key> from Minion config or from Pillar''' if 's3.' + key in self.opts: return self.opts['s3.' + key] try: return self.opts['pillar']['s3'][key] except (KeyError, TypeError): return default self.utils['s3.query'](method='GET', bucket=url_data.netloc, path=url_data.path[1:], return_bin=False, local_file=dest, action=None, key=s3_opt('key'), keyid=s3_opt('keyid'), service_url=s3_opt('service_url'), verify_ssl=s3_opt('verify_ssl', True), location=s3_opt('location')) return dest except Exception as exc: raise MinionError( 'Could not fetch from {0}. Exception: {1}'.format(url, exc) ) if url_data.scheme == 'ftp': try: ftp = ftplib.FTP(url_data.hostname) ftp.login() with salt.utils.fopen(dest, 'wb') as fp_: ftp.retrbinary('RETR {0}'.format(url_data.path), fp_.write) return dest except Exception as exc: raise MinionError('Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc)) if url_data.scheme == 'swift': try: def swift_opt(key, default): '''Get value of <key> from Minion config or from Pillar''' if key in self.opts: return self.opts[key] try: return self.opts['pillar'][key] except (KeyError, TypeError): return default swift_conn = SaltSwift(swift_opt('keystone.user', None), swift_opt('keystone.tenant', None), swift_opt('keystone.auth_url', None), swift_opt('keystone.password', None)) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) return dest except Exception: raise MinionError('Could not fetch from {0}'.format(url)) get_kwargs = {} if url_data.username is not None \ and url_data.scheme in ('http', 'https'): netloc = url_data.netloc at_sign_pos = netloc.rfind('@') if at_sign_pos != -1: netloc = netloc[at_sign_pos + 1:] fixed_url = urlunparse( (url_data.scheme, netloc, url_data.path, url_data.params, url_data.query, url_data.fragment)) get_kwargs['auth'] = (url_data.username, url_data.password) else: fixed_url = url destfp = None try: # Tornado calls streaming_callback on redirect response bodies. # But we need streaming to support fetching large files (> RAM avail). # Here we working this around by disabling recording the body for redirections. # The issue is fixed in Tornado 4.3.0 so on_header callback could be removed # when we'll deprecate Tornado<4.3.0. # See #27093 and #30431 for details. # Use list here to make it writable inside the on_header callback. Simple bool doesn't # work here: on_header creates a new local variable instead. This could be avoided in # Py3 with 'nonlocal' statement. There is no Py2 alternative for this. write_body = [False] def on_header(hdr): try: hdr = parse_response_start_line(hdr) except HTTPInputError: # Not the first line, do nothing return write_body[0] = hdr.code not in [301, 302, 303, 307] if no_cache: result = [] def on_chunk(chunk): if write_body[0]: result.append(chunk) else: dest_tmp = "{0}.part".format(dest) # We need an open filehandle to use in the on_chunk callback, # that's why we're not using a with clause here. destfp = salt.utils.fopen(dest_tmp, 'wb') def on_chunk(chunk): if write_body[0]: destfp.write(chunk) query = salt.utils.http.query( fixed_url, stream=True, streaming_callback=on_chunk, header_callback=on_header, username=url_data.username, password=url_data.password, opts=self.opts, **get_kwargs ) if 'handle' not in query: raise MinionError('Error: {0} reading {1}'.format(query['error'], url)) if no_cache: return ''.join(result) else: destfp.close() destfp = None salt.utils.files.rename(dest_tmp, dest) return dest except HTTPError as exc: raise MinionError('HTTP error {0} reading {1}: {3}'.format( exc.code, url, *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code])) except URLError as exc: raise MinionError('Error reading {0}: {1}'.format(url, exc.reason)) finally: if destfp is not None: destfp.close() def get_template( self, url, dest, template='jinja', makedirs=False, saltenv='base', cachedir=None, **kwargs): ''' Cache a file then process it as a template ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') kwargs['saltenv'] = saltenv url_data = urlparse(url) sfn = self.cache_file(url, saltenv, cachedir=cachedir) if not os.path.exists(sfn): return '' if template in salt.utils.templates.TEMPLATE_REGISTRY: data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, **kwargs ) else: log.error('Attempted to render template with unavailable engine ' '{0}'.format(template)) return '' if not data['result']: # Failed to render the template log.error( 'Failed to render template with error: {0}'.format( data['data'] ) ) return '' if not dest: # No destination passed, set the dest as an extrn_files cache dest = self._extrn_path(url, saltenv, cachedir=cachedir) # If Salt generated the dest name, create any required dirs makedirs = True destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: salt.utils.safe_rm(data['data']) return '' shutil.move(data['data'], dest) return dest def _extrn_path(self, url, saltenv, cachedir=None): ''' Return the extn_filepath for a given url ''' url_data = urlparse(url) if salt.utils.is_windows(): netloc = salt.utils.sanitize_win_path_string(url_data.netloc) else: netloc = url_data.netloc # Strip user:pass from URLs netloc = netloc.split('@')[-1] if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) if url_data.query: file_name = '-'.join([url_data.path, url_data.query]) else: file_name = url_data.path return salt.utils.path_join( cachedir, 'extrn_files', saltenv, netloc, file_name ) class LocalClient(Client): ''' Use the local_roots option to parse a local file root ''' def __init__(self, opts): Client.__init__(self, opts) def _find_file(self, path, saltenv='base'): ''' Locate the file path ''' fnd = {'path': '', 'rel': ''} if saltenv not in self.opts['file_roots']: return fnd if salt.utils.url.is_escaped(path): # The path arguments are escaped path = salt.utils.url.unescape(path) for root in self.opts['file_roots'][saltenv]: full = os.path.join(root, path) if os.path.isfile(full): fnd['path'] = full fnd['rel'] = path return fnd return fnd def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Copies a file from the local files directory into :param:`dest` gzip compression settings are ignored for local files ''' path = self._check_proto(path) fnd = self._find_file(path, saltenv) fnd_path = fnd.get('path') if not fnd_path: return '' return fnd_path def file_list(self, saltenv='base', prefix=''): ''' Return a list of files in the given environment with optional relative prefix path to limit directory traversal ''' ret = [] if saltenv not in self.opts['file_roots']: return ret prefix = prefix.strip('/') for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): # Don't walk any directories that match file_ignore_regex or glob dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)] for fname in files: relpath = os.path.relpath(os.path.join(root, fname), path) ret.append(sdecode(relpath)) return ret def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] prefix = prefix.strip('/') if saltenv not in self.opts['file_roots']: return ret for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): # Don't walk any directories that match file_ignore_regex or glob dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)] if len(dirs) == 0 and len(files) == 0: ret.append(sdecode(os.path.relpath(root, path))) return ret def dir_list(self, saltenv='base', prefix=''): ''' List the dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] if saltenv not in self.opts['file_roots']: return ret prefix = prefix.strip('/') for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): ret.append(sdecode(os.path.relpath(root, path))) return ret def __get_file_path(self, path, saltenv='base'): ''' Return either a file path or the result of a remote find_file call. ''' try: path = self._check_proto(path) except MinionError as err: # Local file path if not os.path.isfile(path): msg = 'specified file {0} is not present to generate hash: {1}' log.warning(msg.format(path, err)) return None else: return path return self._find_file(path, saltenv) def hash_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. ''' ret = {} fnd = self.__get_file_path(path, saltenv) if fnd is None: return ret try: # Remote file path (self._find_file() invoked) fnd_path = fnd['path'] except TypeError: # Local file path fnd_path = fnd hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) ret['hash_type'] = hash_type return ret def hash_and_stat_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. Additionally, return the stat result of the file, or None if no stat results were found. ''' ret = {} fnd = self.__get_file_path(path, saltenv) if fnd is None: return ret, None try: # Remote file path (self._find_file() invoked) fnd_path = fnd['path'] fnd_stat = fnd.get('stat') except TypeError: # Local file path fnd_path = fnd try: fnd_stat = list(os.stat(fnd_path)) except Exception: fnd_stat = None hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) ret['hash_type'] = hash_type return ret, fnd_stat def list_env(self, saltenv='base'): ''' Return a list of the files in the file server's specified environment ''' return self.file_list(saltenv) def master_opts(self): ''' Return the master opts data ''' return self.opts def envs(self): ''' Return the available environments ''' ret = [] for saltenv in self.opts['file_roots']: ret.append(saltenv) return ret def ext_nodes(self): ''' Originally returned information via the external_nodes subsystem. External_nodes was deprecated and removed in 2014.1.6 in favor of master_tops (which had been around since pre-0.17). salt-call --local state.show_top ends up here, but master_tops has not been extended to support show_top in a completely local environment yet. It's worth noting that originally this fn started with if 'external_nodes' not in opts: return {} So since external_nodes is gone now, we are just returning the empty dict. ''' return {} class RemoteClient(Client): ''' Interact with the salt master file server. ''' def __init__(self, opts): Client.__init__(self, opts) self.channel = salt.transport.Channel.factory(self.opts) if hasattr(self.channel, 'auth'): self.auth = self.channel.auth else: self.auth = '' def _refresh_channel(self): ''' Reset the channel, in the event of an interruption ''' self.channel = salt.transport.Channel.factory(self.opts) return self.channel def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Get a single file from the salt-master path must be a salt server location, aka, salt://path/to/file, if dest is omitted, then the downloaded file will be placed in the minion cache ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not salt.utils.is_windows(): hash_server, stat_server = self.hash_and_stat_file(path, saltenv) try: mode_server = stat_server[0] except (IndexError, TypeError): mode_server = None else: hash_server = self.hash_file(path, saltenv) mode_server = None # Check if file exists on server, before creating files and # directories if hash_server == '': log.debug( 'Could not find file \'%s\' in saltenv \'%s\'', path, saltenv ) return False # Hash compare local copy with master and skip download # if no difference found. dest2check = dest if not dest2check: rel_path = self._check_proto(path) log.debug( 'In saltenv \'%s\', looking at rel_path \'%s\' to resolve ' '\'%s\'', saltenv, rel_path, path ) with self._cache_loc( rel_path, saltenv, cachedir=cachedir) as cache_dest: dest2check = cache_dest log.debug( 'In saltenv \'%s\', ** considering ** path \'%s\' to resolve ' '\'%s\'', saltenv, dest2check, path ) if dest2check and os.path.isfile(dest2check): if not salt.utils.is_windows(): hash_local, stat_local = \ self.hash_and_stat_file(dest2check, saltenv) try: mode_local = stat_local[0] except (IndexError, TypeError): mode_local = None else: hash_local = self.hash_file(dest2check, saltenv) mode_local = None if hash_local == hash_server: return dest2check log.debug( 'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'', saltenv, path ) d_tries = 0 transport_tries = 0 path = self._check_proto(path) load = {'path': path, 'saltenv': saltenv, 'cmd': '_serve_file'} if gzip: gzip = int(gzip) load['gzip'] = gzip fn_ = None if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return False # We need an open filehandle here, that's why we're not using a # with clause: fn_ = salt.utils.fopen(dest, 'wb+') else: log.debug('No dest file found') while True: if not fn_: load['loc'] = 0 else: load['loc'] = fn_.tell() data = self.channel.send(load, raw=True) if six.PY3: # Sometimes the source is local (eg when using # 'salt.filesystem.FSChan'), in which case the keys are # already strings. Sometimes the source is remote, in which # case the keys are bytes due to raw mode. Standardize on # strings for the top-level keys to simplify things. data = decode_dict_keys_to_str(data) try: if not data['data']: if not fn_ and data['dest']: # This is a 0 byte file on the master with self._cache_loc( data['dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest with salt.utils.fopen(cache_dest, 'wb+') as ofile: ofile.write(data['data']) if 'hsum' in data and d_tries < 3: # Master has prompted a file verification, if the # verification fails, re-download the file. Try 3 times d_tries += 1 hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5'))) if hsum != data['hsum']: log.warning( 'Bad download of file %s, attempt %d of 3', path, d_tries ) continue break if not fn_: with self._cache_loc( data['dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest # If a directory was formerly cached at this path, then # remove it to avoid a traceback trying to write the file if os.path.isdir(dest): salt.utils.rm_rf(dest) fn_ = salt.utils.fopen(dest, 'wb+') if data.get('gzip', None): data = salt.utils.gzip_util.uncompress(data['data']) else: data = data['data'] if six.PY3 and isinstance(data, str): data = data.encode() fn_.write(data) except (TypeError, KeyError) as exc: try: data_type = type(data).__name__ except AttributeError: # Shouldn't happen, but don't let this cause a traceback. data_type = str(type(data)) transport_tries += 1 log.warning( 'Data transport is broken, got: %s, type: %s, ' 'exception: %s, attempt %d of 3', data, data_type, exc, transport_tries ) self._refresh_channel() if transport_tries > 3: log.error( 'Data transport is broken, got: %s, type: %s, ' 'exception: %s, retry attempts exhausted', data, data_type, exc ) break if fn_: fn_.close() log.info( 'Fetching file from saltenv \'%s\', ** done ** \'%s\'', saltenv, path ) else: log.debug( 'In saltenv \'%s\', we are ** missing ** the file \'%s\'', saltenv, path ) return dest def file_list(self, saltenv='base', prefix=''): ''' List the files on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_file_list'} return [sdecode(fn_) for fn_ in self.channel.send(load)] def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_file_list_emptydirs'} self.channel.send(load) def dir_list(self, saltenv='base', prefix=''): ''' List the dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_dir_list'} return self.channel.send(load) def symlink_list(self, saltenv='base', prefix=''): ''' List symlinked files and dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_symlink_list'} return self.channel.send(load) def __hash_and_stat_file(self, path, saltenv='base'): ''' Common code for hashing and stating files ''' try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): msg = 'specified file {0} is not present to generate hash: {1}' log.warning(msg.format(path, err)) return {} else: ret = {} hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(path, form=hash_type) ret['hash_type'] = hash_type return ret load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_hash'} return self.channel.send(load) def hash_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. ''' return self.__hash_and_stat_file(path, saltenv) def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' hash_result = self.hash_file(path, saltenv) try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): return hash_result, None else: try: return hash_result, list(os.stat(path)) except Exception: return hash_result, None load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_find'} fnd = self.channel.send(load) try: stat_result = fnd.get('stat') except AttributeError: stat_result = None return hash_result, stat_result def list_env(self, saltenv='base'): ''' Return a list of the files in the file server's specified environment ''' load = {'saltenv': saltenv, 'cmd': '_file_list'} return self.channel.send(load) def envs(self): ''' Return a list of available environments ''' load = {'cmd': '_file_envs'} return self.channel.send(load) def master_opts(self): ''' Return the master opts data ''' load = {'cmd': '_master_opts'} return self.channel.send(load) def ext_nodes(self): ''' Return the metadata derived from the external nodes system on the master. ''' load = {'cmd': '_ext_nodes', 'id': self.opts['id'], 'opts': self.opts} if self.auth: load['tok'] = self.auth.gen_token('salt') return self.channel.send(load) class FSClient(RemoteClient): ''' A local client that uses the RemoteClient but substitutes the channel for the FSChan object ''' def __init__(self, opts): # pylint: disable=W0231 Client.__init__(self, opts) # pylint: disable=W0233 self.channel = salt.fileserver.FSChan(opts) self.auth = DumbAuth() class DumbAuth(object): ''' The dumbauth class is used to stub out auth calls fired from the FSClient subsystem ''' def gen_token(self, clear_tok): return clear_tok
./CrossVul/dataset_final_sorted/CWE-200/py/good_3325_0
crossvul-python_data_bad_609_0
import re import warnings import six from django.http import HttpResponse from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_AUTHORIZATION in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: basic_auth = get_request_basic_auth(request) if basic_auth is None or basic_auth not in self.basic_auth: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) """ # if request.POST['signature'] != expected_signature: # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. (E.g., MailgunTrackingWebhookView will return "Mailgun") """ return re.sub(r'(Tracking|Inbox)WebhookView$', "", self.__class__.__name__)
./CrossVul/dataset_final_sorted/CWE-200/py/bad_609_0
crossvul-python_data_good_4954_0
from __future__ import unicode_literals import base64 import binascii import hashlib import importlib import warnings from collections import OrderedDict from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.signals import setting_changed from django.dispatch import receiver from django.utils import lru_cache from django.utils.crypto import ( constant_time_compare, get_random_string, pbkdf2, ) from django.utils.encoding import force_bytes, force_str, force_text from django.utils.module_loading import import_string from django.utils.translation import ugettext_noop as _ UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX def is_password_usable(encoded): if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX): return False try: identify_hasher(encoded) except ValueError: return False return True def check_password(password, encoded, setter=None, preferred='default'): """ Returns a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None or not is_password_usable(encoded): return False preferred = get_hasher(preferred) hasher = identify_hasher(encoded) hasher_changed = hasher.algorithm != preferred.algorithm must_update = hasher_changed or preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) # If the hasher didn't change (we don't protect against enumeration if it # does) and the password should get updated, try to close the timing gap # between the work factor of the current encoded password and the default # work factor. if not is_correct and not hasher_changed and must_update: hasher.harden_runtime(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct def make_password(password, salt=None, hasher='default'): """ Turn a plain-text password into a hash for database storage Same as encode() but generates a new random salt. If password is None then a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string will be returned which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info. """ if password is None: return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH) hasher = get_hasher(hasher) if not salt: salt = hasher.salt() return hasher.encode(password, salt) @lru_cache.lru_cache() def get_hashers(): hashers = [] for hasher_path in settings.PASSWORD_HASHERS: hasher_cls = import_string(hasher_path) hasher = hasher_cls() if not getattr(hasher, 'algorithm'): raise ImproperlyConfigured("hasher doesn't specify an " "algorithm name: %s" % hasher_path) hashers.append(hasher) return hashers @lru_cache.lru_cache() def get_hashers_by_algorithm(): return {hasher.algorithm: hasher for hasher in get_hashers()} @receiver(setting_changed) def reset_hashers(**kwargs): if kwargs['setting'] == 'PASSWORD_HASHERS': get_hashers.cache_clear() get_hashers_by_algorithm.cache_clear() def get_hasher(algorithm='default'): """ Returns an instance of a loaded password hasher. If algorithm is 'default', the default hasher will be returned. This function will also lazy import hashers specified in your settings file if needed. """ if hasattr(algorithm, 'algorithm'): return algorithm elif algorithm == 'default': return get_hashers()[0] else: hashers = get_hashers_by_algorithm() try: return hashers[algorithm] except KeyError: raise ValueError("Unknown password hashing algorithm '%s'. " "Did you specify it in the PASSWORD_HASHERS " "setting?" % algorithm) def identify_hasher(encoded): """ Returns an instance of a loaded password hasher. Identifies hasher algorithm by examining encoded hash, and calls get_hasher() to return hasher. Raises ValueError if algorithm cannot be identified, or if hasher is not loaded. """ # Ancient versions of Django created plain MD5 passwords and accepted # MD5 passwords with an empty salt. if ((len(encoded) == 32 and '$' not in encoded) or (len(encoded) == 37 and encoded.startswith('md5$$'))): algorithm = 'unsalted_md5' # Ancient versions of Django accepted SHA1 passwords with an empty salt. elif len(encoded) == 46 and encoded.startswith('sha1$$'): algorithm = 'unsalted_sha1' else: algorithm = encoded.split('$', 1)[0] return get_hasher(algorithm) def mask_hash(hash, show=6, char="*"): """ Returns the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked class BasePasswordHasher(object): """ Abstract base class for password hashers When creating your own hasher, you need to override algorithm, verify(), encode() and safe_summary(). PasswordHasher objects are immutable. """ algorithm = None library = None def _load_library(self): if self.library is not None: if isinstance(self.library, (tuple, list)): name, mod_path = self.library else: mod_path = self.library try: module = importlib.import_module(mod_path) except ImportError as e: raise ValueError("Couldn't load %r algorithm library: %s" % (self.__class__.__name__, e)) return module raise ValueError("Hasher %r doesn't specify a library attribute" % self.__class__.__name__) def salt(self): """ Generates a cryptographically secure nonce salt in ASCII """ return get_random_string() def verify(self, password, encoded): """ Checks if the given password is correct """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') def encode(self, password, salt): """ Creates an encoded database value The result is normally formatted as "algorithm$salt$hash" and must be fewer than 128 characters. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') def safe_summary(self, encoded): """ Returns a summary of safe values The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') def must_update(self, encoded): return False def harden_runtime(self, password, encoded): """ Bridge the runtime gap between the work factor supplied in `encoded` and the work factor suggested by this hasher. Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and `self.iterations` is 30000, this method should run password through another 10000 iterations of PBKDF2. Similar approaches should exist for any hasher that has a work factor. If not, this method should be defined as a no-op to silence the warning. """ warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method') class PBKDF2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the PBKDF2 algorithm (recommended) Configured to use PBKDF2 + HMAC + SHA256. The result is a 64 byte binary string. Iterations may be changed safely but you must rename the algorithm if you change SHA256. """ algorithm = "pbkdf2_sha256" iterations = 30000 digest = hashlib.sha256 def encode(self, password, salt, iterations=None): assert password is not None assert salt and '$' not in salt if not iterations: iterations = self.iterations hash = pbkdf2(password, salt, iterations, digest=self.digest) hash = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash) def verify(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt, int(iterations)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('iterations'), iterations), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(hash)), ]) def must_update(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) return int(iterations) != self.iterations def harden_runtime(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) extra_iterations = self.iterations - int(iterations) if extra_iterations > 0: self.encode(password, salt, extra_iterations) class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher): """ Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1(). """ algorithm = "pbkdf2_sha1" digest = hashlib.sha1 class BCryptSHA256PasswordHasher(BasePasswordHasher): """ Secure password hashing using the bcrypt algorithm (recommended) This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. """ algorithm = "bcrypt_sha256" digest = hashlib.sha256 library = ("bcrypt", "bcrypt") rounds = 12 def salt(self): bcrypt = self._load_library() return bcrypt.gensalt(self.rounds) def encode(self, password, salt): bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password # truncation as described in #20138. if self.digest is not None: # Use binascii.hexlify() because a hex encoded bytestring is # Unicode on Python 3. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) data = bcrypt.hashpw(password, salt) return "%s$%s" % (self.algorithm, force_text(data)) def verify(self, password, encoded): algorithm, data = encoded.split('$', 1) assert algorithm == self.algorithm encoded_2 = self.encode(password, force_bytes(data)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, empty, algostr, work_factor, data = encoded.split('$', 4) assert algorithm == self.algorithm salt, checksum = data[:22], data[22:] return OrderedDict([ (_('algorithm'), algorithm), (_('work factor'), work_factor), (_('salt'), mask_hash(salt)), (_('checksum'), mask_hash(checksum)), ]) def must_update(self, encoded): algorithm, empty, algostr, rounds, data = encoded.split('$', 4) return int(rounds) != self.rounds def harden_runtime(self, password, encoded): _, data = encoded.split('$', 1) salt = data[:29] # Length of the salt in bcrypt. rounds = data.split('$')[2] # work factor is logarithmic, adding one doubles the load. diff = 2**(self.rounds - int(rounds)) - 1 while diff > 0: self.encode(password, force_bytes(salt)) diff -= 1 class BCryptPasswordHasher(BCryptSHA256PasswordHasher): """ Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to the 72 character bcrypt password truncation, most use cases should prefer the BCryptSHA256PasswordHasher. See: https://code.djangoproject.com/ticket/20138 """ algorithm = "bcrypt" digest = None class SHA1PasswordHasher(BasePasswordHasher): """ The SHA1 password hashing algorithm (not recommended) """ algorithm = "sha1" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.sha1(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class MD5PasswordHasher(BasePasswordHasher): """ The Salted MD5 password hashing algorithm (not recommended) """ algorithm = "md5" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.md5(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class UnsaltedSHA1PasswordHasher(BasePasswordHasher): """ Very insecure algorithm that you should *never* use; stores SHA1 hashes with an empty salt. This class is implemented because Django used to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_sha1" def salt(self): return '' def encode(self, password, salt): assert salt == '' hash = hashlib.sha1(force_bytes(password)).hexdigest() return 'sha1$$%s' % hash def verify(self, password, encoded): encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): assert encoded.startswith('sha1$$') hash = encoded[6:] return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(hash)), ]) def harden_runtime(self, password, encoded): pass class UnsaltedMD5PasswordHasher(BasePasswordHasher): """ Incredibly insecure algorithm that you should *never* use; stores unsalted MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an empty salt. This class is implemented because Django used to store passwords this way and to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_md5" def salt(self): return '' def encode(self, password, salt): assert salt == '' return hashlib.md5(force_bytes(password)).hexdigest() def verify(self, password, encoded): if len(encoded) == 37 and encoded.startswith('md5$$'): encoded = encoded[5:] encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ]) def harden_runtime(self, password, encoded): pass class CryptPasswordHasher(BasePasswordHasher): """ Password hashing using UNIX crypt (not recommended) The crypt module is not supported on all platforms. """ algorithm = "crypt" library = "crypt" def salt(self): return get_random_string(2) def encode(self, password, salt): crypt = self._load_library() assert len(salt) == 2 data = crypt.crypt(force_str(password), salt) # we don't need to store the salt, but Django used to do this return "%s$%s$%s" % (self.algorithm, '', data) def verify(self, password, encoded): crypt = self._load_library() algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return constant_time_compare(data, crypt.crypt(force_str(password), data)) def safe_summary(self, encoded): algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), salt), (_('hash'), mask_hash(data, show=3)), ]) def harden_runtime(self, password, encoded): pass
./CrossVul/dataset_final_sorted/CWE-200/py/good_4954_0
crossvul-python_data_good_1559_0
import os.path import logging from ceph_deploy import hosts, exc from ceph_deploy.cliutil import priority LOG = logging.getLogger(__name__) def fetch_file(args, frompath, topath, _hosts): if os.path.exists(topath): LOG.debug('Have %s', topath) return True else: for hostname in _hosts: filepath = frompath.format(hostname=hostname) LOG.debug('Checking %s for %s', hostname, filepath) distro = hosts.get(hostname, username=args.username) key = distro.conn.remote_module.get_file(filepath) if key is not None: LOG.debug('Got %s key from %s.', topath, hostname) with file(topath, 'w') as f: f.write(key) return True distro.conn.exit() LOG.warning('Unable to find %s on %s', filepath, hostname) return False def gatherkeys(args): oldmask = os.umask(077) try: # client.admin keyring = '/etc/ceph/{cluster}.client.admin.keyring'.format( cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.client.admin.keyring'.format( cluster=args.cluster), _hosts=args.mon, ) if not r: raise exc.KeyNotFoundError(keyring, args.mon) # mon. keyring = '/var/lib/ceph/mon/{cluster}-{{hostname}}/keyring'.format( cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.mon.keyring'.format(cluster=args.cluster), _hosts=args.mon, ) if not r: raise exc.KeyNotFoundError(keyring, args.mon) # bootstrap for what in ['osd', 'mds', 'rgw']: keyring = '/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format( what=what, cluster=args.cluster) r = fetch_file( args=args, frompath=keyring, topath='{cluster}.bootstrap-{what}.keyring'.format( cluster=args.cluster, what=what), _hosts=args.mon, ) if not r: if what in ['osd', 'mds']: raise exc.KeyNotFoundError(keyring, args.mon) else: LOG.warning(("No RGW bootstrap key found. Will not be able to " "deploy RGW daemons")) finally: os.umask(oldmask) @priority(40) def make(parser): """ Gather authentication keys for provisioning new nodes. """ parser.add_argument( 'mon', metavar='HOST', nargs='+', help='monitor host to pull keys from', ) parser.set_defaults( func=gatherkeys, )
./CrossVul/dataset_final_sorted/CWE-200/py/good_1559_0
crossvul-python_data_good_4650_1
from typing import List, Optional, Tuple import graphene from django.conf import settings from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db import transaction from django.db.models import Prefetch from graphql_jwt.exceptions import PermissionDenied from ...account.error_codes import AccountErrorCode from ...checkout import models from ...checkout.error_codes import CheckoutErrorCode from ...checkout.utils import ( abort_order_data, add_promo_code_to_checkout, add_variant_to_checkout, change_billing_address_in_checkout, change_shipping_address_in_checkout, clean_checkout, create_order, get_user_checkout, get_valid_shipping_methods_for_checkout, prepare_order_data, recalculate_checkout_discount, remove_promo_code_from_checkout, ) from ...core import analytics from ...core.exceptions import InsufficientStock from ...core.permissions import OrderPermissions from ...core.taxes import TaxError from ...core.utils.url import validate_storefront_url from ...discount import models as voucher_model from ...payment import PaymentError, gateway, models as payment_models from ...payment.interface import AddressData from ...payment.utils import store_customer_id from ...product import models as product_models from ...warehouse.availability import check_stock_quantity, get_available_quantity from ..account.i18n import I18nMixin from ..account.types import AddressInput from ..core.mutations import ( BaseMutation, ClearMetaBaseMutation, ModelMutation, UpdateMetaBaseMutation, ) from ..core.types.common import CheckoutError from ..core.utils import from_global_id_strict_type from ..order.types import Order from ..product.types import ProductVariant from ..shipping.types import ShippingMethod from .types import Checkout, CheckoutLine ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping" def clean_shipping_method( checkout: models.Checkout, method: Optional[models.ShippingMethod], discounts ) -> bool: """Check if current shipping method is valid.""" if not method: # no shipping method was provided, it is valid return True if not checkout.is_shipping_required(): raise ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED.value ) if not checkout.shipping_address: raise ValidationError( "Cannot choose a shipping method for a checkout without the " "shipping address.", code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET.value, ) valid_methods = get_valid_shipping_methods_for_checkout(checkout, discounts) return method in valid_methods def update_checkout_shipping_method_if_invalid(checkout: models.Checkout, discounts): # remove shipping method when empty checkout if checkout.quantity == 0 or not checkout.is_shipping_required(): checkout.shipping_method = None checkout.save(update_fields=["shipping_method", "last_change"]) is_valid = clean_shipping_method( checkout=checkout, method=checkout.shipping_method, discounts=discounts ) if not is_valid: cheapest_alternative = get_valid_shipping_methods_for_checkout( checkout, discounts ).first() checkout.shipping_method = cheapest_alternative checkout.save(update_fields=["shipping_method", "last_change"]) def check_lines_quantity(variants, quantities, country): """Check if stock is sufficient for each line in the list of dicts.""" for variant, quantity in zip(variants, quantities): if quantity < 0: raise ValidationError( { "quantity": ValidationError( "The quantity should be higher than zero.", code=CheckoutErrorCode.ZERO_QUANTITY, ) } ) if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY: raise ValidationError( { "quantity": ValidationError( "Cannot add more than %d times this item." "" % settings.MAX_CHECKOUT_LINE_QUANTITY, code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT, ) } ) try: check_stock_quantity(variant, country, quantity) except InsufficientStock as e: available_quantity = get_available_quantity(e.item, country) message = ( "Could not add item " + "%(item_name)s. Only %(remaining)d remaining in stock." % { "remaining": available_quantity, "item_name": e.item.display_product(), } ) raise ValidationError({"quantity": ValidationError(message, code=e.code)}) class CheckoutLineInput(graphene.InputObjectType): quantity = graphene.Int(required=True, description="The number of items purchased.") variant_id = graphene.ID(required=True, description="ID of the product variant.") class CheckoutCreateInput(graphene.InputObjectType): lines = graphene.List( CheckoutLineInput, description=( "A list of checkout lines, each containing information about " "an item in the checkout." ), required=True, ) email = graphene.String(description="The customer's email address.") shipping_address = AddressInput( description=( "The mailing address to where the checkout will be shipped. " "Note: the address will be ignored if the checkout " "doesn't contain shippable items." ) ) billing_address = AddressInput(description="Billing address of the customer.") class CheckoutCreate(ModelMutation, I18nMixin): created = graphene.Field( graphene.Boolean, description=( "Whether the checkout was created or the current active one was returned. " "Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart " "with an active checkout." ), ) class Arguments: input = CheckoutCreateInput( required=True, description="Fields required to create checkout." ) class Meta: description = "Create a new checkout." model = models.Checkout return_field_name = "checkout" error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def process_checkout_lines( cls, lines, country ) -> Tuple[List[product_models.ProductVariant], List[int]]: variant_ids = [line.get("variant_id") for line in lines] variants = cls.get_nodes_or_error( variant_ids, "variant_id", ProductVariant, qs=product_models.ProductVariant.objects.prefetch_related( "product__product_type" ), ) quantities = [line.get("quantity") for line in lines] check_lines_quantity(variants, quantities, country) return variants, quantities @classmethod def retrieve_shipping_address(cls, user, data: dict) -> Optional[models.Address]: if "shipping_address" in data: return cls.validate_address(data["shipping_address"]) if user.is_authenticated: return user.default_shipping_address return None @classmethod def retrieve_billing_address(cls, user, data: dict) -> Optional[models.Address]: if "billing_address" in data: return cls.validate_address(data["billing_address"]) if user.is_authenticated: return user.default_billing_address return None @classmethod def clean_input(cls, info, instance: models.Checkout, data, input_cls=None): cleaned_input = super().clean_input(info, instance, data) user = info.context.user country = info.context.country.code # Resolve and process the lines, retrieving the variants and quantities lines = data.pop("lines", None) if lines: ( cleaned_input["variants"], cleaned_input["quantities"], ) = cls.process_checkout_lines(lines, country) cleaned_input["shipping_address"] = cls.retrieve_shipping_address(user, data) cleaned_input["billing_address"] = cls.retrieve_billing_address(user, data) # Use authenticated user's email as default email if user.is_authenticated: email = data.pop("email", None) cleaned_input["email"] = email or user.email return cleaned_input @classmethod def save_addresses(cls, instance: models.Checkout, cleaned_input: dict): shipping_address = cleaned_input.get("shipping_address") billing_address = cleaned_input.get("billing_address") updated_fields = ["last_change"] if shipping_address and instance.is_shipping_required(): shipping_address.save() instance.shipping_address = shipping_address.get_copy() updated_fields.append("shipping_address") if billing_address: billing_address.save() instance.billing_address = billing_address.get_copy() updated_fields.append("billing_address") # Note django will simply return if the list is empty instance.save(update_fields=updated_fields) @classmethod @transaction.atomic() def save(cls, info, instance: models.Checkout, cleaned_input): # Create the checkout object instance.save() country = info.context.country instance.set_country(country.code, commit=True) # Retrieve the lines to create variants = cleaned_input.get("variants") quantities = cleaned_input.get("quantities") # Create the checkout lines if variants and quantities: for variant, quantity in zip(variants, quantities): try: add_variant_to_checkout(instance, variant, quantity) except InsufficientStock as exc: raise ValidationError( f"Insufficient product stock: {exc.item}", code=exc.code ) # Save provided addresses and associate them to the checkout cls.save_addresses(instance, cleaned_input) @classmethod def perform_mutation(cls, _root, info, **data): user = info.context.user # `perform_mutation` is overridden to properly get or create a checkout # instance here and abort mutation if needed. if user.is_authenticated: checkout, _ = get_user_checkout(user) if checkout is not None: # If user has an active checkout, return it without any # modifications. return CheckoutCreate(checkout=checkout, created=False) checkout = models.Checkout(user=user) else: checkout = models.Checkout() cleaned_input = cls.clean_input(info, checkout, data.get("input")) checkout = cls.construct_instance(checkout, cleaned_input) cls.clean_instance(info, checkout) cls.save(info, checkout, cleaned_input) cls._save_m2m(info, checkout, cleaned_input) return CheckoutCreate(checkout=checkout, created=True) class CheckoutLinesAdd(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="The ID of the checkout.", required=True) lines = graphene.List( CheckoutLineInput, required=True, description=( "A list of checkout lines, each containing information about " "an item in the checkout." ), ) class Meta: description = "Adds a checkout line to the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, lines, replace=False): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) variant_ids = [line.get("variant_id") for line in lines] variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant) quantities = [line.get("quantity") for line in lines] check_lines_quantity(variants, quantities, checkout.get_country()) if variants and quantities: for variant, quantity in zip(variants, quantities): try: add_variant_to_checkout( checkout, variant, quantity, replace=replace ) except InsufficientStock as exc: raise ValidationError( f"Insufficient product stock: {exc.item}", code=exc.code ) update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutLinesAdd(checkout=checkout) class CheckoutLinesUpdate(CheckoutLinesAdd): checkout = graphene.Field(Checkout, description="An updated checkout.") class Meta: description = "Updates checkout line in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, root, info, checkout_id, lines): return super().perform_mutation(root, info, checkout_id, lines, replace=True) class CheckoutLineDelete(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="The ID of the checkout.", required=True) line_id = graphene.ID(description="ID of the checkout line to delete.") class Meta: description = "Deletes a CheckoutLine." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, line_id): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) line = cls.get_node_or_error( info, line_id, only_type=CheckoutLine, field="line_id" ) if line and line in checkout.lines.all(): line.delete() update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutLineDelete(checkout=checkout) class CheckoutCustomerAttach(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") customer_id = graphene.ID( required=False, description=( "The ID of the customer. DEPRECATED: This field is deprecated and will " "be removed in Saleor 2.11. To identify a customer you should " "authenticate with JWT token." ), ) class Meta: description = "Sets the customer as the owner of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def check_permissions(cls, context): return context.user.is_authenticated @classmethod def perform_mutation(cls, _root, info, checkout_id, customer_id=None): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) # Check if provided customer_id matches with the authenticated user and raise # error if it doesn't. This part can be removed when `customer_id` field is # removed. if customer_id: current_user_id = graphene.Node.to_global_id("User", info.context.user.id) if current_user_id != customer_id: raise PermissionDenied() checkout.user = info.context.user checkout.save(update_fields=["user", "last_change"]) return CheckoutCustomerAttach(checkout=checkout) class CheckoutCustomerDetach(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) class Meta: description = "Removes the user assigned as the owner of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def check_permissions(cls, context): return context.user.is_authenticated @classmethod def perform_mutation(cls, _root, info, checkout_id): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) # Raise error if the current user doesn't own the checkout of the given ID. if checkout.user and checkout.user != info.context.user: raise PermissionDenied() checkout.user = None checkout.save(update_fields=["user", "last_change"]) return CheckoutCustomerDetach(checkout=checkout) class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") shipping_address = AddressInput( required=True, description="The mailing address to where the checkout will be shipped.", ) class Meta: description = "Update shipping address in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, shipping_address): pk = from_global_id_strict_type(checkout_id, Checkout, field="checkout_id") try: checkout = models.Checkout.objects.prefetch_related( "lines__variant__product__product_type" ).get(pk=pk) except ObjectDoesNotExist: raise ValidationError( { "checkout_id": ValidationError( f"Couldn't resolve to a node: {checkout_id}", code=CheckoutErrorCode.NOT_FOUND, ) } ) if not checkout.is_shipping_required(): raise ValidationError( { "shipping_address": ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED, ) } ) shipping_address = cls.validate_address( shipping_address, instance=checkout.shipping_address, info=info ) update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) with transaction.atomic(): shipping_address.save() change_shipping_address_in_checkout(checkout, shipping_address) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutShippingAddressUpdate(checkout=checkout) class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") billing_address = AddressInput( required=True, description="The billing address of the checkout." ) class Meta: description = "Update billing address in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, billing_address): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) billing_address = cls.validate_address( billing_address, instance=checkout.billing_address, info=info ) with transaction.atomic(): billing_address.save() change_billing_address_in_checkout(checkout, billing_address) return CheckoutBillingAddressUpdate(checkout=checkout) class CheckoutEmailUpdate(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.") email = graphene.String(required=True, description="email.") class Meta: description = "Updates email address in the existing checkout object." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, email): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) checkout.email = email cls.clean_instance(info, checkout) checkout.save(update_fields=["email", "last_change"]) return CheckoutEmailUpdate(checkout=checkout) class CheckoutShippingMethodUpdate(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.") shipping_method_id = graphene.ID(required=True, description="Shipping method.") class Meta: description = "Updates the shipping address of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, shipping_method_id): pk = from_global_id_strict_type( checkout_id, only_type=Checkout, field="checkout_id" ) try: checkout = models.Checkout.objects.prefetch_related( "lines__variant__product__collections", "lines__variant__product__product_type", ).get(pk=pk) except ObjectDoesNotExist: raise ValidationError( { "checkout_id": ValidationError( f"Couldn't resolve to a node: {checkout_id}", code=CheckoutErrorCode.NOT_FOUND, ) } ) if not checkout.is_shipping_required(): raise ValidationError( { "shipping_method": ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED, ) } ) shipping_method = cls.get_node_or_error( info, shipping_method_id, only_type=ShippingMethod, field="shipping_method_id", ) shipping_method_is_valid = clean_shipping_method( checkout=checkout, method=shipping_method, discounts=info.context.discounts ) if not shipping_method_is_valid: raise ValidationError( { "shipping_method": ValidationError( "This shipping method is not applicable.", code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE, ) } ) checkout.shipping_method = shipping_method checkout.save(update_fields=["shipping_method", "last_change"]) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutShippingMethodUpdate(checkout=checkout) class CheckoutComplete(BaseMutation): order = graphene.Field(Order, description="Placed order.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) store_source = graphene.Boolean( default_value=False, description=( "Determines whether to store the payment source for future usage." ), ) redirect_url = graphene.String( required=False, description=( "URL of a view where users should be redirected to " "see the order details. URL in RFC 1808 format." ), ) class Meta: description = ( "Completes the checkout. As a result a new order is created and " "a payment charge is made. This action requires a successful " "payment before it can be performed." ) error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, store_source, **data): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id", qs=models.Checkout.objects.prefetch_related( "gift_cards", "lines", Prefetch( "payments", queryset=payment_models.Payment.objects.prefetch_related( "order", "order__lines" ), ), ).select_related("shipping_method", "shipping_method__shipping_zone"), ) discounts = info.context.discounts user = info.context.user clean_checkout(checkout, discounts) payment = checkout.get_last_active_payment() with transaction.atomic(): try: order_data = prepare_order_data( checkout=checkout, tracking_code=analytics.get_client_id(info.context), discounts=discounts, ) except InsufficientStock as e: raise ValidationError( f"Insufficient product stock: {e.item}", code=e.code ) except voucher_model.NotApplicable: raise ValidationError( "Voucher not applicable", code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE, ) except TaxError as tax_error: return ValidationError( "Unable to calculate taxes - %s" % str(tax_error), code=CheckoutErrorCode.TAX_ERROR, ) billing_address = order_data["billing_address"] shipping_address = order_data.get("shipping_address", None) billing_address = AddressData(**billing_address.as_data()) if shipping_address is not None: shipping_address = AddressData(**shipping_address.as_data()) try: txn = gateway.process_payment( payment=payment, token=payment.token, store_source=store_source ) if not txn.is_success: raise PaymentError(txn.error) except PaymentError as e: abort_order_data(order_data) raise ValidationError(str(e), code=CheckoutErrorCode.PAYMENT_ERROR) if txn.customer_id and user.is_authenticated: store_customer_id(user, payment.gateway, txn.customer_id) redirect_url = data.get("redirect_url", "") if redirect_url: try: validate_storefront_url(redirect_url) except ValidationError as error: raise ValidationError( {"redirect_url": error}, code=AccountErrorCode.INVALID ) # create the order into the database order = create_order( checkout=checkout, order_data=order_data, user=user, redirect_url=redirect_url, ) # remove checkout after order is successfully paid checkout.delete() # return the success response with the newly created order data return CheckoutComplete(order=order) class CheckoutAddPromoCode(BaseMutation): checkout = graphene.Field( Checkout, description="The checkout with the added gift card or voucher." ) class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) promo_code = graphene.String( description="Gift card code or voucher code.", required=True ) class Meta: description = "Adds a gift card or a voucher to a checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, promo_code): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) add_promo_code_to_checkout(checkout, promo_code, info.context.discounts) return CheckoutAddPromoCode(checkout=checkout) class CheckoutRemovePromoCode(BaseMutation): checkout = graphene.Field( Checkout, description="The checkout with the removed gift card or voucher." ) class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) promo_code = graphene.String( description="Gift card code or voucher code.", required=True ) class Meta: description = "Remove a gift card or a voucher from a checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, promo_code): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) remove_promo_code_from_checkout(checkout, promo_code) return CheckoutRemovePromoCode(checkout=checkout) class CheckoutUpdateMeta(UpdateMetaBaseMutation): class Meta: description = "Updates metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = True error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutUpdatePrivateMeta(UpdateMetaBaseMutation): class Meta: description = "Updates private metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = False error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutClearMeta(ClearMetaBaseMutation): class Meta: description = "Clear metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = True error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutClearPrivateMeta(ClearMetaBaseMutation): class Meta: description = "Clear private metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = False error_type_class = CheckoutError error_type_field = "checkout_errors"
./CrossVul/dataset_final_sorted/CWE-200/py/good_4650_1
crossvul-python_data_good_3808_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import hashlib import os import re from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt import images LOG = logging.getLogger(__name__) util_opts = [ cfg.StrOpt('image_info_filename_pattern', default='$instances_path/$base_dir_name/%(image)s.info', help='Allows image information files to be stored in ' 'non-standard locations') ] flags.DECLARE('instances_path', 'nova.compute.manager') flags.DECLARE('base_dir_name', 'nova.compute.manager') FLAGS = flags.FLAGS FLAGS.register_opts(util_opts) def execute(*args, **kwargs): return utils.execute(*args, **kwargs) def get_iscsi_initiator(): """Get iscsi initiator name for this machine""" # NOTE(vish) openiscsi stores initiator name in a file that # needs root permission to read. contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') for l in contents.split('\n'): if l.startswith('InitiatorName='): return l[l.index('=') + 1:].strip() def create_image(disk_format, path, size): """Create a disk image :param disk_format: Disk image format (as known by qemu-img) :param path: Desired location of the disk image :param size: Desired size of disk image. May be given as an int or a string. If given as an int, it will be interpreted as bytes. If it's a string, it should consist of a number with an optional suffix ('K' for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes). If no suffix is given, it will be interpreted as bytes. """ execute('qemu-img', 'create', '-f', disk_format, path, size) def create_cow_image(backing_file, path): """Create COW image Creates a COW image with the given backing file :param backing_file: Existing image on which to base the COW image :param path: Desired location of the COW image """ execute('qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % backing_file, path) def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ free_space = volume_group_free_space(vg) def check_size(size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % locals()) if sparse: preallocated_space = 64 * 1024 * 1024 check_size(preallocated_space) if free_space < size: LOG.warning(_('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.') % locals()) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3) def volume_group_free_space(vg): """Return available space on volume group in bytes. :param vg: volume group name """ out, err = execute('vgs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'vg_free', vg, run_as_root=True) return int(out.strip()) def list_logical_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name """ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()] def logical_volume_size(path): """Get logical volume size in bytes. :param path: logical volume path """ # TODO(p-draigbrady) POssibly replace with the more general # use of blockdev --getsize64 in future out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units', 'b', '--nosuffix', path, run_as_root=True) return int(out) def clear_logical_volume(path): """Obfuscate the logical volume. :param path: logical volume path """ # TODO(p-draigbrady): We currently overwrite with zeros # but we may want to make this configurable in future # for more or less security conscious setups. vol_size = logical_volume_size(path) bs = 1024 * 1024 remaining_bytes = vol_size # The loop caters for versions of dd that # don't support the iflag=count_bytes option. while remaining_bytes: zero_blocks = remaining_bytes / bs seek_blocks = (vol_size - remaining_bytes) / bs zero_cmd = ('dd', 'bs=%s' % bs, 'if=/dev/zero', 'of=%s' % path, 'seek=%s' % seek_blocks, 'count=%s' % zero_blocks) if zero_blocks: utils.execute(*zero_cmd, run_as_root=True) remaining_bytes %= bs bs /= 1024 # Limit to 3 iterations def remove_logical_volumes(*paths): """Remove one or more logical volume.""" for path in paths: clear_logical_volume(path) if paths: lvremove = ('lvremove', '-f') + paths execute(*lvremove, attempts=3, run_as_root=True) def pick_disk_driver_name(is_block_dev=False): """Pick the libvirt primary backend driver name If the hypervisor supports multiple backend drivers, then the name attribute selects the primary backend driver name, while the optional type attribute provides the sub-type. For example, xen supports a name of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2", while qemu only supports a name of "qemu", but multiple types including "raw", "bochs", "qcow2", and "qed". :param is_block_dev: :returns: driver_name or None """ if FLAGS.libvirt_type == "xen": if is_block_dev: return "phy" else: return "tap" elif FLAGS.libvirt_type in ('kvm', 'qemu'): return "qemu" else: # UML doesn't want a driver_name set return None def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path)['virtual size'] size = size.split('(')[1].split()[0] return int(size) def get_disk_backing_file(path): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).get('backing file') if backing_file: if 'actual path: ' in backing_file: backing_file = backing_file.split('actual path: ')[1][:-1] backing_file = os.path.basename(backing_file) return backing_file def copy_image(src, dest, host=None): """Copy a disk image to an existing directory :param src: Source image :param dest: Destination path :param host: Remote host """ if not host: # We shell out to cp because that will intelligently copy # sparse files. I.E. holes will not be written to DEST, # rather recreated efficiently. In addition, since # coreutils 8.11, holes can be read efficiently too. execute('cp', src, dest) else: dest = "%s:%s" % (host, dest) # Try rsync first as that can compress and create sparse dest files. # Note however that rsync currently doesn't read sparse files # efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918 # At least network traffic is mitigated with compression. try: # Do a relatively light weight test first, so that we # can fall back to scp, without having run out of space # on the destination for example. execute('rsync', '--sparse', '--compress', '--dry-run', src, dest) except exception.ProcessExecutionError: execute('scp', src, dest) else: execute('rsync', '--sparse', '--compress', src, dest) def mkfs(fs, path, label=None): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': execute('mkswap', path) else: args = ['mkfs', '-t', fs] #add -F to force no interactive excute on non-block device. if fs in ['ext3', 'ext4']: args.extend(['-F']) if label: args.extend(['-n', label]) args.append(path) execute(*args) def write_to_file(path, contents, umask=None): """Write the given contents to a file :param path: Destination file :param contents: Desired contents of the file :param umask: Umask to set when creating this file (will be reset) """ if umask: saved_umask = os.umask(umask) try: with open(path, 'w') as f: f.write(contents) finally: if umask: os.umask(saved_umask) def chown(path, owner): """Change ownership of file or directory :param path: File or directory whose ownership to change :param owner: Desired new owner (given as uid or username) """ execute('chown', owner, path, run_as_root=True) def create_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def delete_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): """Extract a named snapshot from a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image :param out_path: Desired path of extracted snapshot """ # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt, '-s', snapshot_name, disk_path, out_path) execute(*qemu_img_cmd) def load_file(path): """Read contents of file :param path: File to read """ with open(path, 'r') as fp: return fp.read() def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def file_delete(path): """Delete (unlink) file Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return os.unlink(path) def get_fs_info(path): """Get free/used/total space info for a filesystem :param path: Any dirent on the filesystem :returns: A dict containing: :free: How much space is free (in bytes) :used: How much space is used (in bytes) :total: How big the filesystem is (in bytes) """ hddinfo = os.statvfs(path) total = hddinfo.f_frsize * hddinfo.f_blocks free = hddinfo.f_frsize * hddinfo.f_bavail used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree) return {'total': total, 'free': free, 'used': used} def fetch_image(context, target, image_id, user_id, project_id): """Grab image""" images.fetch_to_raw(context, image_id, target, user_id, project_id) def get_info_filename(base_path): """Construct a filename for storing addtional information about a base image. Returns a filename. """ base_file = os.path.basename(base_path) return (FLAGS.image_info_filename_pattern % {'image': base_file}) def is_valid_info_file(path): """Test if a given path matches the pattern for info files.""" digest_size = hashlib.sha1().digestsize * 2 regexp = (FLAGS.image_info_filename_pattern % {'image': ('([0-9a-f]{%(digest_size)d}|' '[0-9a-f]{%(digest_size)d}_sm|' '[0-9a-f]{%(digest_size)d}_[0-9]+)' % {'digest_size': digest_size})}) m = re.match(regexp, path) if m: return True return False def read_stored_info(base_path, field=None): """Read information about an image. Returns an empty dictionary if there is no info, just the field value if a field is requested, or the entire dictionary otherwise. """ info_file = get_info_filename(base_path) if not os.path.exists(info_file): # Special case to handle essex checksums being converted old_filename = base_path + '.sha1' if field == 'sha1' and os.path.exists(old_filename): hash_file = open(old_filename) hash_value = hash_file.read() hash_file.close() write_stored_info(base_path, field=field, value=hash_value) os.remove(old_filename) d = {field: hash_value} else: d = {} else: LOG.info(_('Reading image info file: %s'), info_file) f = open(info_file, 'r') serialized = f.read().rstrip() f.close() LOG.info(_('Read: %s'), serialized) try: d = jsonutils.loads(serialized) except ValueError, e: LOG.error(_('Error reading image info file %(filename)s: ' '%(error)s'), {'filename': info_file, 'error': e}) d = {} if field: return d.get(field, None) return d def write_stored_info(target, field=None, value=None): """Write information about an image.""" if not field: return info_file = get_info_filename(target) utils.ensure_tree(os.path.dirname(info_file)) d = read_stored_info(info_file) d[field] = value serialized = jsonutils.dumps(d) LOG.info(_('Writing image info file: %s'), info_file) LOG.info(_('Wrote: %s'), serialized) f = open(info_file, 'w') f.write(serialized) f.close()
./CrossVul/dataset_final_sorted/CWE-200/py/good_3808_0
crossvul-python_data_bad_3808_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import hashlib import os import re from nova import exception from nova import flags from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt import images LOG = logging.getLogger(__name__) util_opts = [ cfg.StrOpt('image_info_filename_pattern', default='$instances_path/$base_dir_name/%(image)s.info', help='Allows image information files to be stored in ' 'non-standard locations') ] flags.DECLARE('instances_path', 'nova.compute.manager') flags.DECLARE('base_dir_name', 'nova.compute.manager') FLAGS = flags.FLAGS FLAGS.register_opts(util_opts) def execute(*args, **kwargs): return utils.execute(*args, **kwargs) def get_iscsi_initiator(): """Get iscsi initiator name for this machine""" # NOTE(vish) openiscsi stores initiator name in a file that # needs root permission to read. contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') for l in contents.split('\n'): if l.startswith('InitiatorName='): return l[l.index('=') + 1:].strip() def create_image(disk_format, path, size): """Create a disk image :param disk_format: Disk image format (as known by qemu-img) :param path: Desired location of the disk image :param size: Desired size of disk image. May be given as an int or a string. If given as an int, it will be interpreted as bytes. If it's a string, it should consist of a number with an optional suffix ('K' for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes). If no suffix is given, it will be interpreted as bytes. """ execute('qemu-img', 'create', '-f', disk_format, path, size) def create_cow_image(backing_file, path): """Create COW image Creates a COW image with the given backing file :param backing_file: Existing image on which to base the COW image :param path: Desired location of the COW image """ execute('qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % backing_file, path) def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ free_space = volume_group_free_space(vg) def check_size(size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % locals()) if sparse: preallocated_space = 64 * 1024 * 1024 check_size(preallocated_space) if free_space < size: LOG.warning(_('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.') % locals()) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3) def volume_group_free_space(vg): """Return available space on volume group in bytes. :param vg: volume group name """ out, err = execute('vgs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'vg_free', vg, run_as_root=True) return int(out.strip()) def list_logical_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name """ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()] def remove_logical_volumes(*paths): """Remove one or more logical volume.""" if paths: lvremove = ('lvremove', '-f') + paths execute(*lvremove, attempts=3, run_as_root=True) def pick_disk_driver_name(is_block_dev=False): """Pick the libvirt primary backend driver name If the hypervisor supports multiple backend drivers, then the name attribute selects the primary backend driver name, while the optional type attribute provides the sub-type. For example, xen supports a name of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2", while qemu only supports a name of "qemu", but multiple types including "raw", "bochs", "qcow2", and "qed". :param is_block_dev: :returns: driver_name or None """ if FLAGS.libvirt_type == "xen": if is_block_dev: return "phy" else: return "tap" elif FLAGS.libvirt_type in ('kvm', 'qemu'): return "qemu" else: # UML doesn't want a driver_name set return None def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path)['virtual size'] size = size.split('(')[1].split()[0] return int(size) def get_disk_backing_file(path): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).get('backing file') if backing_file: if 'actual path: ' in backing_file: backing_file = backing_file.split('actual path: ')[1][:-1] backing_file = os.path.basename(backing_file) return backing_file def copy_image(src, dest, host=None): """Copy a disk image to an existing directory :param src: Source image :param dest: Destination path :param host: Remote host """ if not host: # We shell out to cp because that will intelligently copy # sparse files. I.E. holes will not be written to DEST, # rather recreated efficiently. In addition, since # coreutils 8.11, holes can be read efficiently too. execute('cp', src, dest) else: dest = "%s:%s" % (host, dest) # Try rsync first as that can compress and create sparse dest files. # Note however that rsync currently doesn't read sparse files # efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918 # At least network traffic is mitigated with compression. try: # Do a relatively light weight test first, so that we # can fall back to scp, without having run out of space # on the destination for example. execute('rsync', '--sparse', '--compress', '--dry-run', src, dest) except exception.ProcessExecutionError: execute('scp', src, dest) else: execute('rsync', '--sparse', '--compress', src, dest) def mkfs(fs, path, label=None): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': execute('mkswap', path) else: args = ['mkfs', '-t', fs] #add -F to force no interactive excute on non-block device. if fs in ['ext3', 'ext4']: args.extend(['-F']) if label: args.extend(['-n', label]) args.append(path) execute(*args) def write_to_file(path, contents, umask=None): """Write the given contents to a file :param path: Destination file :param contents: Desired contents of the file :param umask: Umask to set when creating this file (will be reset) """ if umask: saved_umask = os.umask(umask) try: with open(path, 'w') as f: f.write(contents) finally: if umask: os.umask(saved_umask) def chown(path, owner): """Change ownership of file or directory :param path: File or directory whose ownership to change :param owner: Desired new owner (given as uid or username) """ execute('chown', owner, path, run_as_root=True) def create_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def delete_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): """Extract a named snapshot from a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image :param out_path: Desired path of extracted snapshot """ # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt, '-s', snapshot_name, disk_path, out_path) execute(*qemu_img_cmd) def load_file(path): """Read contents of file :param path: File to read """ with open(path, 'r') as fp: return fp.read() def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def file_delete(path): """Delete (unlink) file Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return os.unlink(path) def get_fs_info(path): """Get free/used/total space info for a filesystem :param path: Any dirent on the filesystem :returns: A dict containing: :free: How much space is free (in bytes) :used: How much space is used (in bytes) :total: How big the filesystem is (in bytes) """ hddinfo = os.statvfs(path) total = hddinfo.f_frsize * hddinfo.f_blocks free = hddinfo.f_frsize * hddinfo.f_bavail used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree) return {'total': total, 'free': free, 'used': used} def fetch_image(context, target, image_id, user_id, project_id): """Grab image""" images.fetch_to_raw(context, image_id, target, user_id, project_id) def get_info_filename(base_path): """Construct a filename for storing addtional information about a base image. Returns a filename. """ base_file = os.path.basename(base_path) return (FLAGS.image_info_filename_pattern % {'image': base_file}) def is_valid_info_file(path): """Test if a given path matches the pattern for info files.""" digest_size = hashlib.sha1().digestsize * 2 regexp = (FLAGS.image_info_filename_pattern % {'image': ('([0-9a-f]{%(digest_size)d}|' '[0-9a-f]{%(digest_size)d}_sm|' '[0-9a-f]{%(digest_size)d}_[0-9]+)' % {'digest_size': digest_size})}) m = re.match(regexp, path) if m: return True return False def read_stored_info(base_path, field=None): """Read information about an image. Returns an empty dictionary if there is no info, just the field value if a field is requested, or the entire dictionary otherwise. """ info_file = get_info_filename(base_path) if not os.path.exists(info_file): # Special case to handle essex checksums being converted old_filename = base_path + '.sha1' if field == 'sha1' and os.path.exists(old_filename): hash_file = open(old_filename) hash_value = hash_file.read() hash_file.close() write_stored_info(base_path, field=field, value=hash_value) os.remove(old_filename) d = {field: hash_value} else: d = {} else: LOG.info(_('Reading image info file: %s'), info_file) f = open(info_file, 'r') serialized = f.read().rstrip() f.close() LOG.info(_('Read: %s'), serialized) try: d = jsonutils.loads(serialized) except ValueError, e: LOG.error(_('Error reading image info file %(filename)s: ' '%(error)s'), {'filename': info_file, 'error': e}) d = {} if field: return d.get(field, None) return d def write_stored_info(target, field=None, value=None): """Write information about an image.""" if not field: return info_file = get_info_filename(target) utils.ensure_tree(os.path.dirname(info_file)) d = read_stored_info(info_file) d[field] = value serialized = jsonutils.dumps(d) LOG.info(_('Writing image info file: %s'), info_file) LOG.info(_('Wrote: %s'), serialized) f = open(info_file, 'w') f.write(serialized) f.close()
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3808_0
crossvul-python_data_bad_4954_0
from __future__ import unicode_literals import base64 import binascii import hashlib import importlib from collections import OrderedDict from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.signals import setting_changed from django.dispatch import receiver from django.utils import lru_cache from django.utils.crypto import ( constant_time_compare, get_random_string, pbkdf2, ) from django.utils.encoding import force_bytes, force_str, force_text from django.utils.module_loading import import_string from django.utils.translation import ugettext_noop as _ UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX def is_password_usable(encoded): if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX): return False try: identify_hasher(encoded) except ValueError: return False return True def check_password(password, encoded, setter=None, preferred='default'): """ Returns a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None or not is_password_usable(encoded): return False preferred = get_hasher(preferred) hasher = identify_hasher(encoded) must_update = hasher.algorithm != preferred.algorithm if not must_update: must_update = preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct def make_password(password, salt=None, hasher='default'): """ Turn a plain-text password into a hash for database storage Same as encode() but generates a new random salt. If password is None then a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string will be returned which disallows logins. Additional random string reduces chances of gaining access to staff or superuser accounts. See ticket #20079 for more info. """ if password is None: return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH) hasher = get_hasher(hasher) if not salt: salt = hasher.salt() return hasher.encode(password, salt) @lru_cache.lru_cache() def get_hashers(): hashers = [] for hasher_path in settings.PASSWORD_HASHERS: hasher_cls = import_string(hasher_path) hasher = hasher_cls() if not getattr(hasher, 'algorithm'): raise ImproperlyConfigured("hasher doesn't specify an " "algorithm name: %s" % hasher_path) hashers.append(hasher) return hashers @lru_cache.lru_cache() def get_hashers_by_algorithm(): return {hasher.algorithm: hasher for hasher in get_hashers()} @receiver(setting_changed) def reset_hashers(**kwargs): if kwargs['setting'] == 'PASSWORD_HASHERS': get_hashers.cache_clear() get_hashers_by_algorithm.cache_clear() def get_hasher(algorithm='default'): """ Returns an instance of a loaded password hasher. If algorithm is 'default', the default hasher will be returned. This function will also lazy import hashers specified in your settings file if needed. """ if hasattr(algorithm, 'algorithm'): return algorithm elif algorithm == 'default': return get_hashers()[0] else: hashers = get_hashers_by_algorithm() try: return hashers[algorithm] except KeyError: raise ValueError("Unknown password hashing algorithm '%s'. " "Did you specify it in the PASSWORD_HASHERS " "setting?" % algorithm) def identify_hasher(encoded): """ Returns an instance of a loaded password hasher. Identifies hasher algorithm by examining encoded hash, and calls get_hasher() to return hasher. Raises ValueError if algorithm cannot be identified, or if hasher is not loaded. """ # Ancient versions of Django created plain MD5 passwords and accepted # MD5 passwords with an empty salt. if ((len(encoded) == 32 and '$' not in encoded) or (len(encoded) == 37 and encoded.startswith('md5$$'))): algorithm = 'unsalted_md5' # Ancient versions of Django accepted SHA1 passwords with an empty salt. elif len(encoded) == 46 and encoded.startswith('sha1$$'): algorithm = 'unsalted_sha1' else: algorithm = encoded.split('$', 1)[0] return get_hasher(algorithm) def mask_hash(hash, show=6, char="*"): """ Returns the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked class BasePasswordHasher(object): """ Abstract base class for password hashers When creating your own hasher, you need to override algorithm, verify(), encode() and safe_summary(). PasswordHasher objects are immutable. """ algorithm = None library = None def _load_library(self): if self.library is not None: if isinstance(self.library, (tuple, list)): name, mod_path = self.library else: mod_path = self.library try: module = importlib.import_module(mod_path) except ImportError as e: raise ValueError("Couldn't load %r algorithm library: %s" % (self.__class__.__name__, e)) return module raise ValueError("Hasher %r doesn't specify a library attribute" % self.__class__.__name__) def salt(self): """ Generates a cryptographically secure nonce salt in ASCII """ return get_random_string() def verify(self, password, encoded): """ Checks if the given password is correct """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method') def encode(self, password, salt): """ Creates an encoded database value The result is normally formatted as "algorithm$salt$hash" and must be fewer than 128 characters. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method') def safe_summary(self, encoded): """ Returns a summary of safe values The result is a dictionary and will be used where the password field must be displayed to construct a safe representation of the password. """ raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method') def must_update(self, encoded): return False class PBKDF2PasswordHasher(BasePasswordHasher): """ Secure password hashing using the PBKDF2 algorithm (recommended) Configured to use PBKDF2 + HMAC + SHA256. The result is a 64 byte binary string. Iterations may be changed safely but you must rename the algorithm if you change SHA256. """ algorithm = "pbkdf2_sha256" iterations = 30000 digest = hashlib.sha256 def encode(self, password, salt, iterations=None): assert password is not None assert salt and '$' not in salt if not iterations: iterations = self.iterations hash = pbkdf2(password, salt, iterations, digest=self.digest) hash = base64.b64encode(hash).decode('ascii').strip() return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash) def verify(self, password, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt, int(iterations)) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('iterations'), iterations), (_('salt'), mask_hash(salt)), (_('hash'), mask_hash(hash)), ]) def must_update(self, encoded): algorithm, iterations, salt, hash = encoded.split('$', 3) return int(iterations) != self.iterations class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher): """ Alternate PBKDF2 hasher which uses SHA1, the default PRF recommended by PKCS #5. This is compatible with other implementations of PBKDF2, such as openssl's PKCS5_PBKDF2_HMAC_SHA1(). """ algorithm = "pbkdf2_sha1" digest = hashlib.sha1 class BCryptSHA256PasswordHasher(BasePasswordHasher): """ Secure password hashing using the bcrypt algorithm (recommended) This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. """ algorithm = "bcrypt_sha256" digest = hashlib.sha256 library = ("bcrypt", "bcrypt") rounds = 12 def salt(self): bcrypt = self._load_library() return bcrypt.gensalt(self.rounds) def encode(self, password, salt): bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password # truncation as described in #20138. if self.digest is not None: # Use binascii.hexlify() because a hex encoded bytestring is # Unicode on Python 3. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) data = bcrypt.hashpw(password, salt) return "%s$%s" % (self.algorithm, force_text(data)) def verify(self, password, encoded): algorithm, data = encoded.split('$', 1) assert algorithm == self.algorithm bcrypt = self._load_library() # Hash the password prior to using bcrypt to prevent password # truncation as described in #20138. if self.digest is not None: # Use binascii.hexlify() because a hex encoded bytestring is # Unicode on Python 3. password = binascii.hexlify(self.digest(force_bytes(password)).digest()) else: password = force_bytes(password) # Ensure that our data is a bytestring data = force_bytes(data) # force_bytes() necessary for py-bcrypt compatibility hashpw = force_bytes(bcrypt.hashpw(password, data)) return constant_time_compare(data, hashpw) def safe_summary(self, encoded): algorithm, empty, algostr, work_factor, data = encoded.split('$', 4) assert algorithm == self.algorithm salt, checksum = data[:22], data[22:] return OrderedDict([ (_('algorithm'), algorithm), (_('work factor'), work_factor), (_('salt'), mask_hash(salt)), (_('checksum'), mask_hash(checksum)), ]) def must_update(self, encoded): algorithm, empty, algostr, rounds, data = encoded.split('$', 4) return int(rounds) != self.rounds class BCryptPasswordHasher(BCryptSHA256PasswordHasher): """ Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to the 72 character bcrypt password truncation, most use cases should prefer the BCryptSHA256PasswordHasher. See: https://code.djangoproject.com/ticket/20138 """ algorithm = "bcrypt" digest = None class SHA1PasswordHasher(BasePasswordHasher): """ The SHA1 password hashing algorithm (not recommended) """ algorithm = "sha1" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.sha1(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class MD5PasswordHasher(BasePasswordHasher): """ The Salted MD5 password hashing algorithm (not recommended) """ algorithm = "md5" def encode(self, password, salt): assert password is not None assert salt and '$' not in salt hash = hashlib.md5(force_bytes(salt + password)).hexdigest() return "%s$%s$%s" % (self.algorithm, salt, hash) def verify(self, password, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm encoded_2 = self.encode(password, salt) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): algorithm, salt, hash = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), mask_hash(salt, show=2)), (_('hash'), mask_hash(hash)), ]) class UnsaltedSHA1PasswordHasher(BasePasswordHasher): """ Very insecure algorithm that you should *never* use; stores SHA1 hashes with an empty salt. This class is implemented because Django used to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_sha1" def salt(self): return '' def encode(self, password, salt): assert salt == '' hash = hashlib.sha1(force_bytes(password)).hexdigest() return 'sha1$$%s' % hash def verify(self, password, encoded): encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): assert encoded.startswith('sha1$$') hash = encoded[6:] return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(hash)), ]) class UnsaltedMD5PasswordHasher(BasePasswordHasher): """ Incredibly insecure algorithm that you should *never* use; stores unsalted MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an empty salt. This class is implemented because Django used to store passwords this way and to accept such password hashes. Some older Django installs still have these values lingering around so we need to handle and upgrade them properly. """ algorithm = "unsalted_md5" def salt(self): return '' def encode(self, password, salt): assert salt == '' return hashlib.md5(force_bytes(password)).hexdigest() def verify(self, password, encoded): if len(encoded) == 37 and encoded.startswith('md5$$'): encoded = encoded[5:] encoded_2 = self.encode(password, '') return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): return OrderedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ]) class CryptPasswordHasher(BasePasswordHasher): """ Password hashing using UNIX crypt (not recommended) The crypt module is not supported on all platforms. """ algorithm = "crypt" library = "crypt" def salt(self): return get_random_string(2) def encode(self, password, salt): crypt = self._load_library() assert len(salt) == 2 data = crypt.crypt(force_str(password), salt) # we don't need to store the salt, but Django used to do this return "%s$%s$%s" % (self.algorithm, '', data) def verify(self, password, encoded): crypt = self._load_library() algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return constant_time_compare(data, crypt.crypt(force_str(password), data)) def safe_summary(self, encoded): algorithm, salt, data = encoded.split('$', 2) assert algorithm == self.algorithm return OrderedDict([ (_('algorithm'), algorithm), (_('salt'), salt), (_('hash'), mask_hash(data, show=3)), ])
./CrossVul/dataset_final_sorted/CWE-200/py/bad_4954_0
crossvul-python_data_good_4177_1
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN from rest_framework.views import APIView from backend.response import FormattedResponse from config import config from backend.permissions import AdminOrAnonymousReadOnly class ConfigView(APIView): throttle_scope = "config" permission_classes = (AdminOrAnonymousReadOnly,) def get(self, request, name=None): if name is None: if request.user.is_superuser: return FormattedResponse(config.get_all()) return FormattedResponse(config.get_all_non_sensitive()) if not config.is_sensitive(name) or request.is_superuser: return FormattedResponse(config.get(name)) return FormattedResponse(status=HTTP_403_FORBIDDEN) def post(self, request, name): if "value" not in request.data: return FormattedResponse(status=HTTP_400_BAD_REQUEST) config.set(name, request.data.get("value")) return FormattedResponse() def patch(self, request, name): if "value" not in request.data: return FormattedResponse(status=HTTP_400_BAD_REQUEST) if config.get(name) is not None and isinstance(config.get(name), list): config.set("name", config.get(name).append(request.data["value"])) return FormattedResponse() config.set(name, request.data.get("value")) return FormattedResponse()
./CrossVul/dataset_final_sorted/CWE-200/py/good_4177_1
crossvul-python_data_good_3112_1
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from django.shortcuts import render, get_object_or_404, redirect from django.http import HttpResponse, HttpResponseRedirect from django.contrib.auth import logout from django.conf import settings from django.utils.translation import ugettext as _ from django.contrib.auth.decorators import login_required from django.core.mail.message import EmailMultiAlternatives from django.utils import translation from django.utils.cache import patch_response_headers from django.utils.crypto import get_random_string from django.utils.translation import get_language from django.contrib.auth.models import User from django.contrib.auth import views as auth_views from django.views.generic import TemplateView from django.contrib.auth import update_session_auth_hash from django.core.urlresolvers import reverse from rest_framework.authtoken.models import Token from six.moves.urllib.parse import urlencode from social.backends.utils import load_backends from social.apps.django_app.utils import BACKENDS from social.apps.django_app.views import complete from weblate.accounts.forms import ( RegistrationForm, PasswordForm, PasswordChangeForm, EmailForm, ResetForm, LoginForm, HostingForm, CaptchaRegistrationForm ) from weblate.logger import LOGGER from weblate.accounts.avatar import get_avatar_image, get_fallback_avatar_url from weblate.accounts.models import set_lang, remove_user, Profile from weblate.trans import messages from weblate.trans.models import Change, Project, SubProject from weblate.trans.views.helper import get_project from weblate.accounts.forms import ( ProfileForm, SubscriptionForm, UserForm, ContactForm, SubscriptionSettingsForm, UserSettingsForm, DashboardSettingsForm ) from weblate import appsettings CONTACT_TEMPLATE = ''' Message from %(name)s <%(email)s>: %(message)s ''' HOSTING_TEMPLATE = ''' %(name)s <%(email)s> wants to host %(project)s Project: %(project)s Website: %(url)s Repository: %(repo)s Filemask: %(mask)s Username: %(username)s Additional message: %(message)s ''' class RegistrationTemplateView(TemplateView): ''' Class for rendering registration pages. ''' def get_context_data(self, **kwargs): ''' Creates context for rendering page. ''' context = super(RegistrationTemplateView, self).get_context_data( **kwargs ) context['title'] = _('User registration') return context def mail_admins_contact(request, subject, message, context, sender): ''' Sends a message to the admins, as defined by the ADMINS setting. ''' LOGGER.info( 'contact form from %s', sender, ) if not settings.ADMINS: messages.error( request, _('Message could not be sent to administrator!') ) LOGGER.error( 'ADMINS not configured, can not send message!' ) return mail = EmailMultiAlternatives( '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject % context), message % context, to=[a[1] for a in settings.ADMINS], headers={'Reply-To': sender}, ) mail.send(fail_silently=False) messages.success( request, _('Message has been sent to administrator.') ) def deny_demo(request): """ Denies editing of demo account on demo server. """ messages.warning( request, _('You cannot change demo account on the demo server.') ) return redirect_profile(request.POST.get('activetab')) def redirect_profile(page=''): url = reverse('profile') if page and page.startswith('#'): url = url + page return HttpResponseRedirect(url) @login_required def user_profile(request): profile = request.user.profile if not profile.language: profile.language = get_language() profile.save() form_classes = [ ProfileForm, SubscriptionForm, SubscriptionSettingsForm, UserSettingsForm, DashboardSettingsForm, ] if request.method == 'POST': # Parse POST params forms = [form(request.POST, instance=profile) for form in form_classes] forms.append(UserForm(request.POST, instance=request.user)) if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) if all([form.is_valid() for form in forms]): # Save changes for form in forms: form.save() # Change language set_lang(request, request.user.profile) # Redirect after saving (and possibly changing language) response = redirect_profile(request.POST.get('activetab')) # Set language cookie and activate new language (for message below) lang_code = profile.language response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code) translation.activate(lang_code) messages.success(request, _('Your profile has been updated.')) return response else: forms = [form(instance=profile) for form in form_classes] forms.append(UserForm(instance=request.user)) social = request.user.social_auth.all() social_names = [assoc.provider for assoc in social] all_backends = set(load_backends(BACKENDS).keys()) new_backends = [ x for x in all_backends if x == 'email' or x not in social_names ] license_projects = SubProject.objects.filter( project__in=Project.objects.all_acl(request.user) ).exclude( license='' ) result = render( request, 'accounts/profile.html', { 'form': forms[0], 'subscriptionform': forms[1], 'subscriptionsettingsform': forms[2], 'usersettingsform': forms[3], 'dashboardsettingsform': forms[4], 'userform': forms[5], 'profile': profile, 'title': _('User profile'), 'licenses': license_projects, 'associated': social, 'new_backends': new_backends, } ) result.set_cookie( settings.LANGUAGE_COOKIE_NAME, profile.language ) return result @login_required def user_remove(request): if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) if request.method == 'POST': remove_user(request.user) logout(request) messages.success( request, _('Your account has been removed.') ) return redirect('home') return render( request, 'accounts/removal.html', ) def get_initial_contact(request): ''' Fills in initial contact form fields from request. ''' initial = {} if request.user.is_authenticated(): initial['name'] = request.user.first_name initial['email'] = request.user.email return initial def contact(request): if request.method == 'POST': form = ContactForm(request.POST) if form.is_valid(): mail_admins_contact( request, '%(subject)s', CONTACT_TEMPLATE, form.cleaned_data, form.cleaned_data['email'], ) return redirect('home') else: initial = get_initial_contact(request) if 'subject' in request.GET: initial['subject'] = request.GET['subject'] form = ContactForm(initial=initial) return render( request, 'accounts/contact.html', { 'form': form, 'title': _('Contact'), } ) @login_required def hosting(request): ''' Form for hosting request. ''' if not appsettings.OFFER_HOSTING: return redirect('home') if request.method == 'POST': form = HostingForm(request.POST) if form.is_valid(): context = form.cleaned_data context['username'] = request.user.username mail_admins_contact( request, 'Hosting request for %(project)s', HOSTING_TEMPLATE, context, form.cleaned_data['email'], ) return redirect('home') else: initial = get_initial_contact(request) form = HostingForm(initial=initial) return render( request, 'accounts/hosting.html', { 'form': form, 'title': _('Hosting'), } ) def user_page(request, user): ''' User details page. ''' user = get_object_or_404(User, username=user) profile = Profile.objects.get_or_create(user=user)[0] # Filter all user activity all_changes = Change.objects.last_changes(request.user).filter( user=user, ) # Last user activity last_changes = all_changes[:10] # Filter where project is active user_projects_ids = set(all_changes.values_list( 'translation__subproject__project', flat=True )) user_projects = Project.objects.filter(id__in=user_projects_ids) return render( request, 'accounts/user.html', { 'page_profile': profile, 'page_user': user, 'last_changes': last_changes, 'last_changes_url': urlencode( {'user': user.username.encode('utf-8')} ), 'user_projects': user_projects, } ) def user_avatar(request, user, size): ''' User avatar page. ''' user = get_object_or_404(User, username=user) if user.email == 'noreply@weblate.org': return redirect(get_fallback_avatar_url(size)) response = HttpResponse( content_type='image/png', content=get_avatar_image(request, user, size) ) patch_response_headers(response, 3600 * 24 * 7) return response def weblate_login(request): ''' Login handler, just wrapper around login. ''' # Redirect logged in users to profile if request.user.is_authenticated(): return redirect_profile() # Redirect if there is only one backend auth_backends = list(load_backends(BACKENDS).keys()) if len(auth_backends) == 1 and auth_backends[0] != 'email': return redirect('social:begin', auth_backends[0]) return auth_views.login( request, template_name='accounts/login.html', authentication_form=LoginForm, extra_context={ 'login_backends': [ x for x in auth_backends if x != 'email' ], 'can_reset': 'email' in auth_backends, 'title': _('Login'), } ) @login_required def weblate_logout(request): ''' Logout handler, just wrapper around standard logout. ''' messages.info(request, _('Thanks for using Weblate!')) return auth_views.logout( request, next_page=reverse('home'), ) def register(request): ''' Registration form. ''' if appsettings.REGISTRATION_CAPTCHA: form_class = CaptchaRegistrationForm else: form_class = RegistrationForm if request.method == 'POST': form = form_class(request.POST) if form.is_valid() and appsettings.REGISTRATION_OPEN: # Ensure we do registration in separate session # not sent to client request.session.create() result = complete(request, 'email') request.session.save() request.session = None return result else: form = form_class() backends = set(load_backends(BACKENDS).keys()) # Redirect if there is only one backend if len(backends) == 1 and 'email' not in backends: return redirect('social:begin', backends.pop()) return render( request, 'accounts/register.html', { 'registration_email': 'email' in backends, 'registration_backends': backends - set(['email']), 'title': _('User registration'), 'form': form, } ) @login_required def email_login(request): ''' Connect email. ''' if request.method == 'POST': form = EmailForm(request.POST) if form.is_valid(): return complete(request, 'email') else: form = EmailForm() return render( request, 'accounts/email.html', { 'title': _('Register email'), 'form': form, } ) @login_required def password(request): ''' Password change / set form. ''' if appsettings.DEMO_SERVER and request.user.username == 'demo': return deny_demo(request) do_change = False if not request.user.has_usable_password(): do_change = True change_form = None elif request.method == 'POST': change_form = PasswordChangeForm(request.POST) if change_form.is_valid(): cur_password = change_form.cleaned_data['password'] do_change = request.user.check_password(cur_password) if not do_change: messages.error( request, _('You have entered an invalid password.') ) else: change_form = PasswordChangeForm() if request.method == 'POST': form = PasswordForm(request.POST) if form.is_valid() and do_change: # Clear flag forcing user to set password redirect_page = '#auth' if 'show_set_password' in request.session: del request.session['show_set_password'] redirect_page = '' request.user.set_password( form.cleaned_data['password1'] ) request.user.save() # Update session hash update_session_auth_hash(request, request.user) messages.success( request, _('Your password has been changed.') ) return redirect_profile(redirect_page) else: form = PasswordForm() return render( request, 'accounts/password.html', { 'title': _('Change password'), 'change_form': change_form, 'form': form, } ) def reset_password(request): ''' Password reset handling. ''' if 'email' not in load_backends(BACKENDS).keys(): messages.error( request, _('Can not reset password, email authentication is disabled!') ) return redirect('login') if request.method == 'POST': form = ResetForm(request.POST) if form.is_valid(): # Force creating new session request.session.create() if request.user.is_authenticated(): logout(request) request.session['password_reset'] = True return complete(request, 'email') else: return redirect('email-sent') else: form = ResetForm() return render( request, 'accounts/reset.html', { 'title': _('Password reset'), 'form': form, } ) @login_required def reset_api_key(request): """Resets user API key""" request.user.auth_token.delete() Token.objects.create( user=request.user, key=get_random_string(40) ) return redirect_profile('#api') @login_required def watch(request, project): obj = get_project(request, project) request.user.profile.subscriptions.add(obj) return redirect(obj) @login_required def unwatch(request, project): obj = get_project(request, project) request.user.profile.subscriptions.remove(obj) return redirect(obj)
./CrossVul/dataset_final_sorted/CWE-200/py/good_3112_1
crossvul-python_data_bad_1559_1
import errno import logging import os import uuid import struct import time import base64 import socket from ceph_deploy.cliutil import priority from ceph_deploy import conf, hosts, exc from ceph_deploy.util import arg_validators, ssh, net from ceph_deploy.misc import mon_hosts from ceph_deploy.lib import remoto from ceph_deploy.connection import get_local_connection LOG = logging.getLogger(__name__) def generate_auth_key(): key = os.urandom(16) header = struct.pack( '<hiih', 1, # le16 type: CEPH_CRYPTO_AES int(time.time()), # le32 created: seconds 0, # le32 created: nanoseconds, len(key), # le16: len(key) ) return base64.b64encode(header + key) def ssh_copy_keys(hostname, username=None): LOG.info('making sure passwordless SSH succeeds') if ssh.can_connect_passwordless(hostname): return LOG.warning('could not connect via SSH') # Create the key if it doesn't exist: id_rsa_pub_file = os.path.expanduser(u'~/.ssh/id_rsa.pub') id_rsa_file = id_rsa_pub_file.split('.pub')[0] if not os.path.exists(id_rsa_file): LOG.info('creating a passwordless id_rsa.pub key file') with get_local_connection(LOG) as conn: remoto.process.run( conn, [ 'ssh-keygen', '-t', 'rsa', '-N', "", '-f', id_rsa_file, ] ) # Get the contents of id_rsa.pub and push it to the host LOG.info('will connect again with password prompt') distro = hosts.get(hostname, username, detect_sudo=False) auth_keys_path = '.ssh/authorized_keys' if not distro.conn.remote_module.path_exists(auth_keys_path): distro.conn.logger.warning( '.ssh/authorized_keys does not exist, will skip adding keys' ) else: LOG.info('adding public keys to authorized_keys') with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as id_rsa: contents = id_rsa.read() distro.conn.remote_module.append_to_file( auth_keys_path, contents ) distro.conn.exit() def validate_host_ip(ips, subnets): """ Make sure that a given host all subnets specified will have at least one IP in that range. """ # Make sure we prune ``None`` arguments subnets = [s for s in subnets if s is not None] validate_one_subnet = len(subnets) == 1 def ip_in_one_subnet(ips, subnet): """ ensure an ip exists in at least one subnet """ for ip in ips: if net.ip_in_subnet(ip, subnet): return True return False for subnet in subnets: if ip_in_one_subnet(ips, subnet): if validate_one_subnet: return else: # keep going to make sure the other subnets are ok continue else: msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips)) raise RuntimeError(msg) def get_public_network_ip(ips, public_subnet): """ Given a public subnet, chose the one IP from the remote host that exists within the subnet range. """ for ip in ips: if net.ip_in_subnet(ip, public_subnet): return ip msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet)) raise RuntimeError(msg) def new(args): if args.ceph_conf: raise RuntimeError('will not create a ceph conf file if attemtping to re-use with `--ceph-conf` flag') LOG.debug('Creating new cluster named %s', args.cluster) cfg = conf.ceph.CephConf() cfg.add_section('global') fsid = args.fsid or uuid.uuid4() cfg.set('global', 'fsid', str(fsid)) # if networks were passed in, lets set them in the # global section if args.public_network: cfg.set('global', 'public network', str(args.public_network)) if args.cluster_network: cfg.set('global', 'cluster network', str(args.cluster_network)) mon_initial_members = [] mon_host = [] for (name, host) in mon_hosts(args.mon): # Try to ensure we can ssh in properly before anything else if args.ssh_copykey: ssh_copy_keys(host, args.username) # Now get the non-local IPs from the remote node distro = hosts.get(host, username=args.username) remote_ips = net.ip_addresses(distro.conn) distro.conn.exit() # Validate subnets if we received any if args.public_network or args.cluster_network: validate_host_ip(remote_ips, [args.public_network, args.cluster_network]) # Pick the IP that matches the public cluster (if we were told to do # so) otherwise pick the first, non-local IP LOG.debug('Resolving host %s', host) if args.public_network: ip = get_public_network_ip(remote_ips, args.public_network) else: ip = net.get_nonlocal_ip(host) LOG.debug('Monitor %s at %s', name, ip) mon_initial_members.append(name) try: socket.inet_pton(socket.AF_INET6, ip) mon_host.append("[" + ip + "]") LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6') cfg.set('global', 'ms bind ipv6', 'true') except socket.error: mon_host.append(ip) LOG.debug('Monitor initial members are %s', mon_initial_members) LOG.debug('Monitor addrs are %s', mon_host) cfg.set('global', 'mon initial members', ', '.join(mon_initial_members)) # no spaces here, see http://tracker.newdream.net/issues/3145 cfg.set('global', 'mon host', ','.join(mon_host)) # override undesirable defaults, needed until bobtail # http://tracker.ceph.com/issues/6788 cfg.set('global', 'auth cluster required', 'cephx') cfg.set('global', 'auth service required', 'cephx') cfg.set('global', 'auth client required', 'cephx') # http://tracker.newdream.net/issues/3138 cfg.set('global', 'filestore xattr use omap', 'true') path = '{name}.conf'.format( name=args.cluster, ) new_mon_keyring(args) LOG.debug('Writing initial config to %s...', path) tmp = '%s.tmp' % path with file(tmp, 'w') as f: cfg.write(f) try: os.rename(tmp, path) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(path) else: raise def new_mon_keyring(args): LOG.debug('Creating a random mon key...') mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key() keypath = '{name}.mon.keyring'.format( name=args.cluster, ) LOG.debug('Writing monitor keyring to %s...', keypath) tmp = '%s.tmp' % keypath with file(tmp, 'w') as f: f.write(mon_keyring) try: os.rename(tmp, keypath) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(keypath) else: raise @priority(10) def make(parser): """ Start deploying a new cluster, and write a CLUSTER.conf and keyring for it. """ parser.add_argument( 'mon', metavar='MON', nargs='+', help='initial monitor hostname, fqdn, or hostname:fqdn pair', type=arg_validators.Hostname(), ) parser.add_argument( '--no-ssh-copykey', dest='ssh_copykey', action='store_false', default=True, help='do not attempt to copy SSH keys', ) parser.add_argument( '--fsid', dest='fsid', help='provide an alternate FSID for ceph.conf generation', ) parser.add_argument( '--cluster-network', help='specify the (internal) cluster network', type=arg_validators.Subnet(), ) parser.add_argument( '--public-network', help='specify the public network for a cluster', type=arg_validators.Subnet(), ) parser.set_defaults( func=new, )
./CrossVul/dataset_final_sorted/CWE-200/py/bad_1559_1
crossvul-python_data_good_2837_2
from __future__ import print_function import argparse import json from oauthlib.oauth2 import LegacyApplicationClient import logging import logging.handlers from requests_oauthlib import OAuth2Session import os import requests import six import sys import traceback from six.moves.urllib.parse import quote as urlquote from six.moves.urllib.parse import urlparse # ------------------------------------------------------------------------------ logger = None prog_name = os.path.basename(sys.argv[0]) AUTH_ROLES = ['root-admin', 'realm-admin', 'anonymous'] LOG_FILE_ROTATION_COUNT = 3 TOKEN_URL_TEMPLATE = ( '{server}/auth/realms/{realm}/protocol/openid-connect/token') GET_SERVER_INFO_TEMPLATE = ( '{server}/auth/admin/serverinfo/') GET_REALMS_URL_TEMPLATE = ( '{server}/auth/admin/realms') CREATE_REALM_URL_TEMPLATE = ( '{server}/auth/admin/realms') DELETE_REALM_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}') GET_REALM_METADATA_TEMPLATE = ( '{server}/auth/realms/{realm}/protocol/saml/descriptor') CLIENT_REPRESENTATION_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}') GET_CLIENTS_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients') CLIENT_DESCRIPTOR_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/client-description-converter') CREATE_CLIENT_URL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients') GET_INITIAL_ACCESS_TOKEN_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients-initial-access') SAML2_CLIENT_REGISTRATION_TEMPLATE = ( '{server}/auth/realms/{realm}/clients-registrations/saml2-entity-descriptor') GET_CLIENT_PROTOCOL_MAPPERS_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/models') GET_CLIENT_PROTOCOL_MAPPERS_BY_PROTOCOL_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/protocol/{protocol}') POST_CLIENT_PROTOCOL_MAPPER_TEMPLATE = ( '{server}/auth/admin/realms/{realm}/clients/{id}/protocol-mappers/models') ADMIN_CLIENT_ID = 'admin-cli' # ------------------------------------------------------------------------------ class RESTError(Exception): def __init__(self, status_code, status_reason, response_json, response_text, cmd): self.status_code = status_code self.status_reason = status_reason self.error_description = None self.error = None self.response_json = response_json self.response_text = response_text self.cmd = cmd self.message = '{status_reason}({status_code}): '.format( status_reason=self.status_reason, status_code=self.status_code) if response_json: self.error_description = response_json.get('error_description') if self.error_description is None: self.error_description = response_json.get('errorMessage') self.error = response_json.get('error') self.message += '"{error_description}" [{error}]'.format( error_description=self.error_description, error=self.error) else: self.message += '"{response_text}"'.format( response_text=self.response_text) self.args = (self.message,) def __str__(self): return self.message # ------------------------------------------------------------------------------ def configure_logging(options): global logger # pylint: disable=W0603 log_dir = os.path.dirname(options.log_file) if os.path.exists(log_dir): if not os.path.isdir(log_dir): raise ValueError('logging directory "{log_dir}" exists but is not ' 'directory'.format(log_dir=log_dir)) else: os.makedirs(log_dir) log_level = logging.ERROR if options.verbose: log_level = logging.INFO if options.debug: log_level = logging.DEBUG # These two lines enable debugging at httplib level # (requests->urllib3->http.client) You will see the REQUEST, # including HEADERS and DATA, and RESPONSE with HEADERS but # without DATA. The only thing missing will be the # response.body which is not logged. try: import http.client as http_client # Python 3 except ImportError: import httplib as http_client # Python 2 http_client.HTTPConnection.debuglevel = 1 # Turn on cookielib debugging if False: try: import http.cookiejar as cookiejar except ImportError: import cookielib as cookiejar # Python 2 cookiejar.debug = True logger = logging.getLogger(prog_name) try: file_handler = logging.handlers.RotatingFileHandler( options.log_file, backupCount=LOG_FILE_ROTATION_COUNT) except IOError as e: print('Unable to open log file %s (%s)' % (options.log_file, e), file=sys.stderr) else: formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s: %(message)s') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) console_handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(message)s') console_handler.setFormatter(formatter) console_handler.setLevel(log_level) logger.addHandler(console_handler) # Set the log level on the logger to the lowest level # possible. This allows the message to be emitted from the logger # to it's handlers where the level will be filtered on a per # handler basis. logger.setLevel(1) # ------------------------------------------------------------------------------ def json_pretty(text): return json.dumps(json.loads(text), indent=4, sort_keys=True) def py_json_pretty(py_json): return json_pretty(json.dumps(py_json)) def server_name_from_url(url): return urlparse(url).netloc def get_realm_names_from_realms(realms): return [x['realm'] for x in realms] def get_client_client_ids_from_clients(clients): return [x['clientId'] for x in clients] def find_client_by_name(clients, client_id): for client in clients: if client.get('clientId') == client_id: return client raise KeyError('{item} not found'.format(item=client_id)) # ------------------------------------------------------------------------------ class KeycloakREST(object): def __init__(self, server, auth_role=None, session=None): self.server = server self.auth_role = auth_role self.session = session def get_initial_access_token(self, realm_name): cmd_name = "get initial access token for realm '{realm}'".format( realm=realm_name) url = GET_INITIAL_ACCESS_TOKEN_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) params = {"expiration": 60, # seconds "count": 1} response = self.session.post(url, json=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json # ClientInitialAccessPresentation def get_server_info(self): cmd_name = "get server info" url = GET_SERVER_INFO_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_realms(self): cmd_name = "get realms" url = GET_REALMS_URL_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def create_realm(self, realm_name): cmd_name = "create realm '{realm}'".format(realm=realm_name) url = CREATE_REALM_URL_TEMPLATE.format(server=self.server) logger.debug("%s on server %s", cmd_name, self.server) params = {"enabled": True, "id": realm_name, "realm": realm_name, } response = self.session.post(url, json=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def delete_realm(self, realm_name): cmd_name = "delete realm '{realm}'".format(realm=realm_name) url = DELETE_REALM_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.delete(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def get_realm_metadata(self, realm_name): cmd_name = "get metadata for realm '{realm}'".format(realm=realm_name) url = GET_REALM_METADATA_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.ok: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) return response.text def get_clients(self, realm_name): cmd_name = "get clients in realm '{realm}'".format(realm=realm_name) url = GET_CLIENTS_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_client_by_id(self, realm_name, id): cmd_name = "get client id {id} in realm '{realm}'".format( id=id, realm=realm_name) url = GET_CLIENTS_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) params = {'clientID': id} logger.debug("%s on server %s", cmd_name, self.server) response = self.session.get(url, params=params) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def get_client_by_name(self, realm_name, client_name): clients = self.get_clients(realm_name) client = find_client_by_name(clients, client_name) id = client.get('id') logger.debug("client name '%s' mapped to id '%s'", client_name, id) logger.debug("client %s\n%s", client_name, py_json_pretty(client)) return client def get_client_id_by_name(self, realm_name, client_name): client = self.get_client_by_name(realm_name, client_name) id = client.get('id') return id def get_client_descriptor(self, realm_name, metadata): cmd_name = "get client descriptor realm '{realm}'".format( realm=realm_name) url = CLIENT_DESCRIPTOR_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) headers = {'Content-Type': 'application/xml;charset=utf-8'} response = self.session.post(url, headers=headers, data=metadata) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.ok): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json def create_client_from_descriptor(self, realm_name, descriptor): cmd_name = "create client from descriptor " "'{client_id}'in realm '{realm}'".format( client_id=descriptor['clientId'], realm=realm_name) url = CREATE_CLIENT_URL_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.post(url, json=descriptor) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def create_client(self, realm_name, metadata): logger.debug("create client in realm %s on server %s", realm_name, self.server) descriptor = self.get_client_descriptor(realm_name, metadata) self.create_client_from_descriptor(realm_name, descriptor) return descriptor def register_client(self, initial_access_token, realm_name, metadata): cmd_name = "register_client realm '{realm}'".format( realm=realm_name) url = SAML2_CLIENT_REGISTRATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name)) logger.debug("%s on server %s", cmd_name, self.server) headers = {'Content-Type': 'application/xml;charset=utf-8'} if initial_access_token: headers['Authorization'] = 'Bearer {token}'.format( token=initial_access_token) response = self.session.post(url, headers=headers, data=metadata) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if (not response_json or response.status_code != requests.codes.created): logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, json_pretty(response.text)) return response_json # ClientRepresentation def delete_client_by_name(self, realm_name, client_name): id = self.get_client_id_by_name(realm_name, client_name) self.delete_client_by_id(realm_name, id) def delete_client_by_id(self, realm_name, id): cmd_name = "delete client id '{id}'in realm '{realm}'".format( id=id, realm=realm_name) url = CLIENT_REPRESENTATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.delete(url) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def update_client(self, realm_name, client): id = client['id'] cmd_name = "update client {id} in realm '{realm}'".format( id=client['clientId'], realm=realm_name) url = CLIENT_REPRESENTATION_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.put(url, json=client) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.no_content: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def update_client_attributes(self, realm_name, client, update_attrs): client_id = client['clientId'] logger.debug("update client attrs: client_id=%s " "current attrs=%s update=%s" % (client_id, client['attributes'], update_attrs)) client['attributes'].update(update_attrs) logger.debug("update client attrs: client_id=%s " "new attrs=%s" % (client_id, client['attributes'])) self.update_client(realm_name, client); def update_client_by_name_attributes(self, realm_name, client_name, update_attrs): client = self.get_client_by_name(realm_name, client_name) self.update_client_attributes(realm_name, client, update_attrs) def new_saml_group_protocol_mapper(self, mapper_name, attribute_name, friendly_name=None, single_attribute=True): mapper = { 'protocol': 'saml', 'name': mapper_name, 'protocolMapper': 'saml-group-membership-mapper', 'config': { 'attribute.name': attribute_name, 'attribute.nameformat': 'Basic', 'single': single_attribute, 'full.path': False, }, } if friendly_name: mapper['config']['friendly.name'] = friendly_name return mapper def create_client_protocol_mapper(self, realm_name, client, mapper): id = client['id'] cmd_name = ("create protocol-mapper '{mapper_name}' for client {id} " "in realm '{realm}'".format( mapper_name=mapper['name'],id=client['clientId'], realm=realm_name)) url = POST_CLIENT_PROTOCOL_MAPPER_TEMPLATE.format( server=self.server, realm=urlquote(realm_name), id=urlquote(id)) logger.debug("%s on server %s", cmd_name, self.server) response = self.session.post(url, json=mapper) logger.debug("%s response code: %s %s", cmd_name, response.status_code, response.reason) try: response_json = response.json() except ValueError as e: response_json = None if response.status_code != requests.codes.created: logger.error("%s error: status=%s (%s) text=%s", cmd_name, response.status_code, response.reason, response.text) raise RESTError(response.status_code, response.reason, response_json, response.text, cmd_name) logger.debug("%s response = %s", cmd_name, response.text) def create_client_by_name_protocol_mapper(self, realm_name, client_name, mapper): client = self.get_client_by_name(realm_name, client_name) self.create_client_protocol_mapper(realm_name, client, mapper) def add_client_by_name_redirect_uris(self, realm_name, client_name, uris): client = self.get_client_by_name(realm_name, client_name) uris = set(uris) redirect_uris = set(client['redirectUris']) redirect_uris |= uris client['redirectUris'] = list(redirect_uris) self.update_client(realm_name, client); def remove_client_by_name_redirect_uris(self, realm_name, client_name, uris): client = self.get_client_by_name(realm_name, client_name) uris = set(uris) redirect_uris = set(client['redirectUris']) redirect_uris -= uris client['redirectUris'] = list(redirect_uris) self.update_client(realm_name, client); # ------------------------------------------------------------------------------ class KeycloakAdminConnection(KeycloakREST): def __init__(self, server, auth_role, realm, client_id, username, password, tls_verify): super(KeycloakAdminConnection, self).__init__(server, auth_role) self.realm = realm self.client_id = client_id self.username = username self.password = password self.session = self._create_session(tls_verify) def _create_session(self, tls_verify): token_url = TOKEN_URL_TEMPLATE.format( server=self.server, realm=urlquote(self.realm)) refresh_url = token_url client = LegacyApplicationClient(client_id=self.client_id) session = OAuth2Session(client=client, auto_refresh_url=refresh_url, auto_refresh_kwargs={ 'client_id': self.client_id}) session.verify = tls_verify token = session.fetch_token(token_url=token_url, username=self.username, password=self.password, client_id=self.client_id, verify=session.verify) return session class KeycloakAnonymousConnection(KeycloakREST): def __init__(self, server, tls_verify): super(KeycloakAnonymousConnection, self).__init__(server, 'anonymous') self.session = self._create_session(tls_verify) def _create_session(self, tls_verify): session = requests.Session() session.verify = tls_verify return session # ------------------------------------------------------------------------------ def do_server_info(options, conn): server_info = conn.get_server_info() print(json_pretty(server_info)) def do_list_realms(options, conn): realms = conn.get_realms() realm_names = get_realm_names_from_realms(realms) print('\n'.join(sorted(realm_names))) def do_create_realm(options, conn): conn.create_realm(options.realm_name) def do_delete_realm(options, conn): conn.delete_realm(options.realm_name) def do_get_realm_metadata(options, conn): metadata = conn.get_realm_metadata(options.realm_name) print(metadata) def do_list_clients(options, conn): clients = conn.get_clients(options.realm_name) client_ids = get_client_client_ids_from_clients(clients) print('\n'.join(sorted(client_ids))) def do_create_client(options, conn): metadata = options.metadata.read() descriptor = conn.create_client(options.realm_name, metadata) def do_register_client(options, conn): metadata = options.metadata.read() client_representation = conn.register_client( options.initial_access_token, options.realm_name, metadata) def do_delete_client(options, conn): conn.delete_client_by_name(options.realm_name, options.client_name) def do_client_test(options, conn): 'experimental test code used during development' uri = 'https://openstack.jdennis.oslab.test:5000/v3/mellon/fooResponse' conn.remove_client_by_name_redirect_uri(options.realm_name, options.client_name, uri) # ------------------------------------------------------------------------------ verbose_help = ''' The structure of the command line arguments is "noun verb" where noun is one of Keycloak's data items (e.g. realm, client, etc.) and the verb is an action to perform on the item. Each of the nouns and verbs may have their own set of arguments which must follow the noun or verb. For example to delete the client XYZ in the realm ABC: echo password | {prog_name} -s http://example.com:8080 -P - client delete -r ABC -c XYZ where 'client' is the noun, 'delete' is the verb and -r ABC -c XYZ are arguments to the delete action. If the command completes successfully the exit status is 0. The exit status is 1 if an authenticated connection with the server cannont be successfully established. The exit status is 2 if the REST operation fails. The server should be a scheme://hostname:port URL. ''' class TlsVerifyAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(TlsVerifyAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if values.lower() in ['true', 'yes', 'on']: verify = True elif values.lower() in ['false', 'no', 'off']: verify = False else: verify = values setattr(namespace, self.dest, verify) def main(): global logger result = 0 parser = argparse.ArgumentParser(description='Keycloak REST client', prog=prog_name, epilog=verbose_help.format(prog_name=prog_name), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='be chatty') parser.add_argument('-d', '--debug', action='store_true', help='turn on debug info') parser.add_argument('--show-traceback', action='store_true', help='exceptions print traceback in addition to ' 'error message') parser.add_argument('--log-file', default='/tmp/{prog_name}.log'.format( prog_name=prog_name), help='log file pathname') parser.add_argument('--permit-insecure-transport', action='store_true', help='Normally secure transport such as TLS ' 'is required, defeat this check') parser.add_argument('--tls-verify', action=TlsVerifyAction, default=True, help='TLS certificate verification for requests to' ' the server. May be one of case insenstive ' '[true, yes, on] to enable,' '[false, no, off] to disable.' 'Or the pathname to a OpenSSL CA bundle to use.' ' Default is True.') group = parser.add_argument_group('Server') group.add_argument('-s', '--server', required=True, help='DNS name or IP address of Keycloak server') group.add_argument('-a', '--auth-role', choices=AUTH_ROLES, default='root-admin', help='authenticating as what type of user (default: root-admin)') group.add_argument('-u', '--admin-username', default='admin', help='admin user name (default: admin)') group.add_argument('-P', '--admin-password-file', type=argparse.FileType('rb'), help=('file containing admin password ' '(or use a hyphen "-" to read the password ' 'from stdin)')) group.add_argument('--admin-realm', default='master', help='realm admin belongs to') cmd_parsers = parser.add_subparsers(help='available commands') # --- realm commands --- realm_parser = cmd_parsers.add_parser('realm', help='realm operations') sub_parser = realm_parser.add_subparsers(help='realm commands') cmd_parser = sub_parser.add_parser('server_info', help='dump server info') cmd_parser.set_defaults(func=do_server_info) cmd_parser = sub_parser.add_parser('list', help='list realm names') cmd_parser.set_defaults(func=do_list_realms) cmd_parser = sub_parser.add_parser('create', help='create new realm') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_create_realm) cmd_parser = sub_parser.add_parser('delete', help='delete existing realm') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_delete_realm) cmd_parser = sub_parser.add_parser('metadata', help='retrieve realm metadata') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_get_realm_metadata) # --- client commands --- client_parser = cmd_parsers.add_parser('client', help='client operations') sub_parser = client_parser.add_subparsers(help='client commands') cmd_parser = sub_parser.add_parser('list', help='list client names') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.set_defaults(func=do_list_clients) cmd_parser = sub_parser.add_parser('create', help='create new client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-m', '--metadata', type=argparse.FileType('rb'), required=True, help='SP metadata file or stdin') cmd_parser.set_defaults(func=do_create_client) cmd_parser = sub_parser.add_parser('register', help='register new client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-m', '--metadata', type=argparse.FileType('rb'), required=True, help='SP metadata file or stdin') cmd_parser.add_argument('--initial-access-token', required=True, help='realm initial access token for ' 'client registeration') cmd_parser.set_defaults(func=do_register_client) cmd_parser = sub_parser.add_parser('delete', help='delete existing client') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-c', '--client-name', required=True, help='client name') cmd_parser.set_defaults(func=do_delete_client) cmd_parser = sub_parser.add_parser('test', help='experimental test used during ' 'development') cmd_parser.add_argument('-r', '--realm-name', required=True, help='realm name') cmd_parser.add_argument('-c', '--client-name', required=True, help='client name') cmd_parser.set_defaults(func=do_client_test) # Process command line arguments options = parser.parse_args() configure_logging(options) if options.permit_insecure_transport: os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' # Get admin password options.admin_password = None # 1. Try password file if options.admin_password_file is not None: options.admin_password = options.keycloak_admin_password_file.readline().strip() options.keycloak_admin_password_file.close() # 2. Try KEYCLOAK_ADMIN_PASSWORD environment variable if options.admin_password is None: if (('KEYCLOAK_ADMIN_PASSWORD' in os.environ) and (os.environ['KEYCLOAK_ADMIN_PASSWORD'])): options.admin_password = os.environ['KEYCLOAK_ADMIN_PASSWORD'] try: anonymous_conn = KeycloakAnonymousConnection(options.server, options.tls_verify) admin_conn = KeycloakAdminConnection(options.server, options.auth_role, options.admin_realm, ADMIN_CLIENT_ID, options.admin_username, options.admin_password, options.tls_verify) except Exception as e: if options.show_traceback: traceback.print_exc() print(six.text_type(e), file=sys.stderr) result = 1 return result try: if options.func == do_register_client: conn = admin_conn else: conn = admin_conn result = options.func(options, conn) except Exception as e: if options.show_traceback: traceback.print_exc() print(six.text_type(e), file=sys.stderr) result = 2 return result return result # ------------------------------------------------------------------------------ if __name__ == '__main__': sys.exit(main()) else: logger = logging.getLogger('keycloak-cli')
./CrossVul/dataset_final_sorted/CWE-200/py/good_2837_2
crossvul-python_data_good_4177_0
import time from pydoc import locate from django.conf import settings DEFAULT_CONFIG = { 'config_version': 4, 'flag_prefix': 'ractf', 'graph_members': 10, 'register_end_time': -1, 'end_time': time.time() + 7 * 24 * 60 * 60, 'start_time': time.time(), 'register_start_time': time.time(), 'team_size': -1, 'email_regex': '', 'email_domain': '', 'login_provider': 'basic_auth', 'registration_provider': 'basic_auth', 'token_provider': 'basic_auth', 'enable_bot_users': True, 'enable_ctftime': True, 'enable_flag_submission': True, 'enable_flag_submission_after_competition': True, 'enable_force_admin_2fa': False, 'enable_track_incorrect_submissions': True, 'enable_login': True, 'enable_prelogin': True, 'enable_maintenance_mode': False, 'enable_registration': True, 'enable_scoreboard': True, 'enable_scoring': True, 'enable_solve_broadcast': True, 'enable_teams': True, 'enable_team_join': True, 'enable_view_challenges_after_competion': True, 'enable_team_leave': False, 'invite_required': False, 'hide_scoreboard_at': -1, 'setup_wizard_complete': False, 'sensitive_fields': ['sensitive_fields', 'enable_force_admin_2fa'] } backend = locate(settings.CONFIG['BACKEND'])() backend.load(defaults=DEFAULT_CONFIG) def get(key): return backend.get(key) def set(key, value): backend.set(key, value) def get_all(): return backend.get_all() def get_all_non_sensitive(): sensitive = backend.get('sensitive_fields') config = backend.get_all() for field in sensitive: del config[field] return config def is_sensitive(key): return key in backend.get('sensitive_fields') def set_bulk(values: dict): for key, value in values.items(): set(key, value) def add_plugin_config(name, config): DEFAULT_CONFIG[name] = config
./CrossVul/dataset_final_sorted/CWE-200/py/good_4177_0
crossvul-python_data_good_3332_0
# This file is part of Radicale Server - Calendar Server # Copyright © 2008 Nicolas Kandel # Copyright © 2008 Pascal Halter # Copyright © 2008-2016 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Authentication management. Default is htpasswd authentication. Apache's htpasswd command (httpd.apache.org/docs/programs/htpasswd.html) manages a file for storing user credentials. It can encrypt passwords using different methods, e.g. BCRYPT, MD5-APR1 (a version of MD5 modified for Apache), SHA1, or by using the system's CRYPT routine. The CRYPT and SHA1 encryption methods implemented by htpasswd are considered as insecure. MD5-APR1 provides medium security as of 2015. Only BCRYPT can be considered secure by current standards. MD5-APR1-encrypted credentials can be written by all versions of htpasswd (it is the default, in fact), whereas BCRYPT requires htpasswd 2.4.x or newer. The `is_authenticated(user, password)` function provided by this module verifies the user-given credentials by parsing the htpasswd credential file pointed to by the ``htpasswd_filename`` configuration value while assuming the password encryption method specified via the ``htpasswd_encryption`` configuration value. The following htpasswd password encrpytion methods are supported by Radicale out-of-the-box: - plain-text (created by htpasswd -p...) -- INSECURE - CRYPT (created by htpasswd -d...) -- INSECURE - SHA1 (created by htpasswd -s...) -- INSECURE When passlib (https://pypi.python.org/pypi/passlib) is importable, the following significantly more secure schemes are parsable by Radicale: - MD5-APR1 (htpasswd -m...) -- htpasswd's default method - BCRYPT (htpasswd -B...) -- Requires htpasswd 2.4.x """ import base64 import functools import hashlib import os import random import time from importlib import import_module def load(configuration, logger): """Load the authentication manager chosen in configuration.""" auth_type = configuration.get("auth", "type") logger.debug("Authentication type is %s", auth_type) if auth_type == "None": class_ = NoneAuth elif auth_type == "htpasswd": class_ = Auth else: class_ = import_module(auth_type).Auth return class_(configuration, logger) class BaseAuth: def __init__(self, configuration, logger): self.configuration = configuration self.logger = logger def is_authenticated(self, user, password): """Validate credentials. Iterate through htpasswd credential file until user matches, extract hash (encrypted password) and check hash against user-given password, using the method specified in the Radicale config. """ raise NotImplementedError def map_login_to_user(self, login): """Map login to internal username.""" return login class NoneAuth(BaseAuth): def is_authenticated(self, user, password): return True class Auth(BaseAuth): def __init__(self, configuration, logger): super().__init__(configuration, logger) self.filename = os.path.expanduser( configuration.get("auth", "htpasswd_filename")) self.encryption = configuration.get("auth", "htpasswd_encryption") if self.encryption == "ssha": self.verify = self._ssha elif self.encryption == "sha1": self.verify = self._sha1 elif self.encryption == "plain": self.verify = self._plain elif self.encryption == "md5": try: from passlib.hash import apr_md5_crypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'md5' requires " "the passlib module.") self.verify = functools.partial(self._md5apr1, apr_md5_crypt) elif self.encryption == "bcrypt": try: from passlib.hash import bcrypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'bcrypt' requires " "the passlib module with bcrypt support.") # A call to `encrypt` raises passlib.exc.MissingBackendError with a # good error message if bcrypt backend is not available. Trigger # this here. bcrypt.encrypt("test-bcrypt-backend") self.verify = functools.partial(self._bcrypt, bcrypt) elif self.encryption == "crypt": try: import crypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'crypt' requires " "the crypt() system support.") self.verify = functools.partial(self._crypt, crypt) else: raise RuntimeError( "The htpasswd encryption method '%s' is not " "supported." % self.encryption) def _plain(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, plain method.""" return hash_value == password def _crypt(self, crypt, hash_value, password): """Check if ``hash_value`` and ``password`` match, crypt method.""" return crypt.crypt(password, hash_value) == hash_value def _sha1(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, sha1 method.""" hash_value = hash_value.replace("{SHA}", "").encode("ascii") password = password.encode(self.configuration.get("encoding", "stock")) sha1 = hashlib.sha1() sha1.update(password) return sha1.digest() == base64.b64decode(hash_value) def _ssha(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, salted sha1 method. This method is not directly supported by htpasswd, but it can be written with e.g. openssl, and nginx can parse it. """ hash_value = hash_value.replace( "{SSHA}", "").encode("ascii").decode("base64") password = password.encode(self.configuration.get("encoding", "stock")) hash_value = hash_value[:20] salt_value = hash_value[20:] sha1 = hashlib.sha1() sha1.update(password) sha1.update(salt_value) return sha1.digest() == hash_value def _bcrypt(self, bcrypt, hash_value, password): return bcrypt.verify(password, hash_value) def _md5apr1(self, md5_apr1, hash_value, password): return md5_apr1.verify(password, hash_value) def is_authenticated(self, user, password): # The content of the file is not cached because reading is generally a # very cheap operation, and it's useful to get live updates of the # htpasswd file. with open(self.filename) as fd: for line in fd: line = line.strip() if line: login, hash_value = line.split(":") if login == user and self.verify(hash_value, password): return True # Random timer to avoid timing oracles and simple bruteforce attacks time.sleep(1 + random.random()) return False
./CrossVul/dataset_final_sorted/CWE-362/py/good_3332_0
crossvul-python_data_bad_3332_0
# This file is part of Radicale Server - Calendar Server # Copyright © 2008 Nicolas Kandel # Copyright © 2008 Pascal Halter # Copyright © 2008-2016 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Authentication management. Default is htpasswd authentication. Apache's htpasswd command (httpd.apache.org/docs/programs/htpasswd.html) manages a file for storing user credentials. It can encrypt passwords using different methods, e.g. BCRYPT, MD5-APR1 (a version of MD5 modified for Apache), SHA1, or by using the system's CRYPT routine. The CRYPT and SHA1 encryption methods implemented by htpasswd are considered as insecure. MD5-APR1 provides medium security as of 2015. Only BCRYPT can be considered secure by current standards. MD5-APR1-encrypted credentials can be written by all versions of htpasswd (it is the default, in fact), whereas BCRYPT requires htpasswd 2.4.x or newer. The `is_authenticated(user, password)` function provided by this module verifies the user-given credentials by parsing the htpasswd credential file pointed to by the ``htpasswd_filename`` configuration value while assuming the password encryption method specified via the ``htpasswd_encryption`` configuration value. The following htpasswd password encrpytion methods are supported by Radicale out-of-the-box: - plain-text (created by htpasswd -p...) -- INSECURE - CRYPT (created by htpasswd -d...) -- INSECURE - SHA1 (created by htpasswd -s...) -- INSECURE When passlib (https://pypi.python.org/pypi/passlib) is importable, the following significantly more secure schemes are parsable by Radicale: - MD5-APR1 (htpasswd -m...) -- htpasswd's default method - BCRYPT (htpasswd -B...) -- Requires htpasswd 2.4.x """ import base64 import functools import hashlib import os from importlib import import_module def load(configuration, logger): """Load the authentication manager chosen in configuration.""" auth_type = configuration.get("auth", "type") logger.debug("Authentication type is %s", auth_type) if auth_type == "None": class_ = NoneAuth elif auth_type == "htpasswd": class_ = Auth else: class_ = import_module(auth_type).Auth return class_(configuration, logger) class BaseAuth: def __init__(self, configuration, logger): self.configuration = configuration self.logger = logger def is_authenticated(self, user, password): """Validate credentials. Iterate through htpasswd credential file until user matches, extract hash (encrypted password) and check hash against user-given password, using the method specified in the Radicale config. """ raise NotImplementedError def map_login_to_user(self, login): """Map login to internal username.""" return login class NoneAuth(BaseAuth): def is_authenticated(self, user, password): return True class Auth(BaseAuth): def __init__(self, configuration, logger): super().__init__(configuration, logger) self.filename = os.path.expanduser( configuration.get("auth", "htpasswd_filename")) self.encryption = configuration.get("auth", "htpasswd_encryption") if self.encryption == "ssha": self.verify = self._ssha elif self.encryption == "sha1": self.verify = self._sha1 elif self.encryption == "plain": self.verify = self._plain elif self.encryption == "md5": try: from passlib.hash import apr_md5_crypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'md5' requires " "the passlib module.") self.verify = functools.partial(self._md5apr1, apr_md5_crypt) elif self.encryption == "bcrypt": try: from passlib.hash import bcrypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'bcrypt' requires " "the passlib module with bcrypt support.") # A call to `encrypt` raises passlib.exc.MissingBackendError with a # good error message if bcrypt backend is not available. Trigger # this here. bcrypt.encrypt("test-bcrypt-backend") self.verify = functools.partial(self._bcrypt, bcrypt) elif self.encryption == "crypt": try: import crypt except ImportError: raise RuntimeError( "The htpasswd encryption method 'crypt' requires " "the crypt() system support.") self.verify = functools.partial(self._crypt, crypt) else: raise RuntimeError( "The htpasswd encryption method '%s' is not " "supported." % self.encryption) def _plain(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, plain method.""" return hash_value == password def _crypt(self, crypt, hash_value, password): """Check if ``hash_value`` and ``password`` match, crypt method.""" return crypt.crypt(password, hash_value) == hash_value def _sha1(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, sha1 method.""" hash_value = hash_value.replace("{SHA}", "").encode("ascii") password = password.encode(self.configuration.get("encoding", "stock")) sha1 = hashlib.sha1() sha1.update(password) return sha1.digest() == base64.b64decode(hash_value) def _ssha(self, hash_value, password): """Check if ``hash_value`` and ``password`` match, salted sha1 method. This method is not directly supported by htpasswd, but it can be written with e.g. openssl, and nginx can parse it. """ hash_value = hash_value.replace( "{SSHA}", "").encode("ascii").decode("base64") password = password.encode(self.configuration.get("encoding", "stock")) hash_value = hash_value[:20] salt_value = hash_value[20:] sha1 = hashlib.sha1() sha1.update(password) sha1.update(salt_value) return sha1.digest() == hash_value def _bcrypt(self, bcrypt, hash_value, password): return bcrypt.verify(password, hash_value) def _md5apr1(self, md5_apr1, hash_value, password): return md5_apr1.verify(password, hash_value) def is_authenticated(self, user, password): # The content of the file is not cached because reading is generally a # very cheap operation, and it's useful to get live updates of the # htpasswd file. with open(self.filename) as fd: for line in fd: line = line.strip() if line: login, hash_value = line.split(":") if login == user: return self.verify(hash_value, password) return False
./CrossVul/dataset_final_sorted/CWE-362/py/bad_3332_0
crossvul-python_data_good_3333_0
# -*- coding: utf-8 -*- # # This file is part of Radicale Server - Calendar Server # Copyright © 2008 Nicolas Kandel # Copyright © 2008 Pascal Halter # Copyright © 2008-2013 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Implement htpasswd authentication. Apache's htpasswd command (httpd.apache.org/docs/programs/htpasswd.html) manages a file for storing user credentials. It can encrypt passwords using different methods, e.g. BCRYPT, MD5-APR1 (a version of MD5 modified for Apache), SHA1, or by using the system's CRYPT routine. The CRYPT and SHA1 encryption methods implemented by htpasswd are considered as insecure. MD5-APR1 provides medium security as of 2015. Only BCRYPT can be considered secure by current standards. MD5-APR1-encrypted credentials can be written by all versions of htpasswd (its the default, in fact), whereas BCRYPT requires htpasswd 2.4.x or newer. The `is_authenticated(user, password)` function provided by this module verifies the user-given credentials by parsing the htpasswd credential file pointed to by the ``htpasswd_filename`` configuration value while assuming the password encryption method specified via the ``htpasswd_encryption`` configuration value. The following htpasswd password encrpytion methods are supported by Radicale out-of-the-box: - plain-text (created by htpasswd -p...) -- INSECURE - CRYPT (created by htpasswd -d...) -- INSECURE - SHA1 (created by htpasswd -s...) -- INSECURE When passlib (https://pypi.python.org/pypi/passlib) is importable, the following significantly more secure schemes are parsable by Radicale: - MD5-APR1 (htpasswd -m...) -- htpasswd's default method - BCRYPT (htpasswd -B...) -- Requires htpasswd 2.4.x """ import base64 import hashlib import os import random import time from .. import config FILENAME = os.path.expanduser(config.get("auth", "htpasswd_filename")) ENCRYPTION = config.get("auth", "htpasswd_encryption") def _plain(hash_value, password): """Check if ``hash_value`` and ``password`` match, using plain method.""" return hash_value == password def _crypt(hash_value, password): """Check if ``hash_value`` and ``password`` match, using crypt method.""" return crypt.crypt(password, hash_value) == hash_value def _sha1(hash_value, password): """Check if ``hash_value`` and ``password`` match, using sha1 method.""" hash_value = hash_value.replace("{SHA}", "").encode("ascii") password = password.encode(config.get("encoding", "stock")) sha1 = hashlib.sha1() # pylint: disable=E1101 sha1.update(password) return sha1.digest() == base64.b64decode(hash_value) def _ssha(hash_salt_value, password): """Check if ``hash_salt_value`` and ``password`` match, using salted sha1 method. This method is not directly supported by htpasswd, but it can be written with e.g. openssl, and nginx can parse it.""" hash_salt_value = base64.b64decode(hash_salt_value.replace("{SSHA}", "")) password = password.encode(config.get("encoding", "stock")) hash_value = hash_salt_value[:20] salt_value = hash_salt_value[20:] sha1 = hashlib.sha1() # pylint: disable=E1101 sha1.update(password) sha1.update(salt_value) return sha1.digest() == hash_value def _bcrypt(hash_value, password): return _passlib_bcrypt.verify(password, hash_value) def _md5apr1(hash_value, password): return _passlib_md5apr1.verify(password, hash_value) # Prepare mapping between encryption names and verification functions. # Pre-fill with methods that do not have external dependencies. _verifuncs = { "ssha": _ssha, "sha1": _sha1, "plain": _plain} # Conditionally attempt to import external dependencies. if ENCRYPTION == "md5": try: from passlib.hash import apr_md5_crypt as _passlib_md5apr1 except ImportError: raise RuntimeError(("The htpasswd_encryption method 'md5' requires " "availability of the passlib module.")) _verifuncs["md5"] = _md5apr1 elif ENCRYPTION == "bcrypt": try: from passlib.hash import bcrypt as _passlib_bcrypt except ImportError: raise RuntimeError(("The htpasswd_encryption method 'bcrypt' requires " "availability of the passlib module with bcrypt support.")) # A call to `encrypt` raises passlib.exc.MissingBackendError with a good # error message if bcrypt backend is not available. Trigger this here. _passlib_bcrypt.encrypt("test-bcrypt-backend") _verifuncs["bcrypt"] = _bcrypt elif ENCRYPTION == "crypt": try: import crypt except ImportError: raise RuntimeError(("The htpasswd_encryption method 'crypt' requires " "crypt() system support.")) _verifuncs["crypt"] = _crypt # Validate initial configuration. if ENCRYPTION not in _verifuncs: raise RuntimeError(("The htpasswd encryption method '%s' is not " "supported." % ENCRYPTION)) def is_authenticated(user, password): """Validate credentials. Iterate through htpasswd credential file until user matches, extract hash (encrypted password) and check hash against user-given password, using the method specified in the Radicale config. """ with open(FILENAME) as f: for line in f: strippedline = line.strip() if strippedline: login, hash_value = strippedline.split(":") if login == user: if _verifuncs[ENCRYPTION](hash_value, password): # Allow encryption method to be overridden at runtime. return True # Random timer to avoid timing oracles and simple bruteforce attacks time.sleep(1 + random.random()) return False
./CrossVul/dataset_final_sorted/CWE-362/py/good_3333_0
crossvul-python_data_good_3284_0
# (from BackInTime) # Copyright (C) 2015-2017 Germar Reitze # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # (from jockey) # (c) 2008 Canonical Ltd. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # (from python-dbus-docs) # Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/> # Copyright (C) 2005-2007 Collabora Ltd. <http://www.collabora.co.uk/> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # This file was modified by David D. Lowe in 2009. # To the extent possible under law, David D. Lowe has waived all # copyright and related or neighboring rights to his modifications to # this file under this license: http://creativecommons.org/publicdomain/zero/1.0/ import os import re from subprocess import Popen, PIPE try: import pwd except ImportError: pwd = None import dbus import dbus.service import dbus.mainloop.pyqt5 from PyQt5.QtCore import QCoreApplication UDEV_RULES_PATH = '/etc/udev/rules.d/99-backintime-%s.rules' class InvalidChar(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.InvalidChar' class InvalidCmd(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.InvalidCmd' class LimitExceeded(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.LimitExceeded' class PermissionDeniedByPolicy(dbus.DBusException): _dbus_error_name = 'com.ubuntu.DeviceDriver.PermissionDeniedByPolicy' class UdevRules(dbus.service.Object): def __init__(self, conn=None, object_path=None, bus_name=None): super(UdevRules, self).__init__(conn, object_path, bus_name) # the following variables are used by _checkPolkitPrivilege self.polkit = None self.enforce_polkit = True self.tmpDict = {} #find su path self.su = self._which('su', '/bin/su') self.backintime = self._which('backintime', '/usr/bin/backintime') self.nice = self._which('nice', '/usr/bin/nice') self.ionice = self._which('ionice', '/usr/bin/ionice') self.max_rules = 100 self.max_users = 20 self.max_cmd_len = 100 def _which(self, exe, fallback): proc = Popen(['which', exe], stdout = PIPE) ret = proc.communicate()[0].strip().decode() if proc.returncode or not ret: return fallback return ret def _validateCmd(self, cmd): if cmd.find("&&") != -1: raise InvalidCmd("Parameter 'cmd' contains '&&' concatenation") # make sure it starts with an absolute path elif not cmd.startswith(os.path.sep): raise InvalidCmd("Parameter 'cmd' does not start with '/'") parts = cmd.split() # make sure only well known commands and switches are used whitelist = ( (self.nice, ("-n")), (self.ionice, ("-c", "-n")), ) for c, switches in whitelist: if parts and parts[0] == c: parts.pop(0) for sw in switches: while parts and parts[0].startswith(sw): parts.pop(0) if not parts: raise InvalidCmd("Parameter 'cmd' does not contain the backintime command") elif parts[0] != self.backintime: raise InvalidCmd("Parameter 'cmd' contains non-whitelisted cmd/parameter (%s)" % parts[0]) def _checkLimits(self, owner, cmd): if len(self.tmpDict.get(owner, [])) >= self.max_rules: raise LimitExceeded("Maximum number of cached rules reached (%d)" % self.max_rules) elif len(self.tmpDict) >= self.max_users: raise LimitExceeded("Maximum number of cached users reached (%d)" % self.max_users) elif len(cmd) > self.max_cmd_len: raise LimitExceeded("Maximum length of command line reached (%d)" % self.max_cmd_len) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='ss', out_signature='', sender_keyword='sender', connection_keyword='conn') def addRule(self, cmd, uuid, sender=None, conn=None): """ Receive command and uuid and create an Udev rule out of this. This is done on the service side to prevent malicious code to run as root. """ #prevent breaking out of su command chars = re.findall(r'[^a-zA-Z0-9-/\.>& ]', cmd) if chars: raise InvalidChar("Parameter 'cmd' contains invalid character(s) %s" % '|'.join(set(chars))) #only allow relevant chars in uuid chars = re.findall(r'[^a-zA-Z0-9-]', uuid) if chars: raise InvalidChar("Parameter 'uuid' contains invalid character(s) %s" % '|'.join(set(chars))) self._validateCmd(cmd) info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() self._checkLimits(owner, cmd) #create su command sucmd = "%s - '%s' -c '%s'" %(self.su, user, cmd) #create Udev rule rule = 'ACTION=="add|change", ENV{ID_FS_UUID}=="%s", RUN+="%s"\n' %(uuid, sucmd) #store rule if not owner in self.tmpDict: self.tmpDict[owner] = [] self.tmpDict[owner].append(rule) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='b', sender_keyword='sender', connection_keyword='conn') def save(self, sender=None, conn=None): """ Save rules to destiantion file after user authenticated as admin. This will first check if there are any changes between temporary added rules and current rules in destiantion file. Returns False if files are identical or no rules to be installed. """ info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() #delete rule if no rules in tmp if not owner in self.tmpDict or not self.tmpDict[owner]: self.delete(sender, conn) return False #return False if rule already exist. if os.path.exists(UDEV_RULES_PATH % user): with open(UDEV_RULES_PATH % user, 'r') as f: if self.tmpDict[owner] == f.readlines(): self._clean(owner) return False #auth to save changes self._checkPolkitPrivilege(sender, conn, 'net.launchpad.backintime.UdevRuleSave') with open(UDEV_RULES_PATH % user, 'w') as f: f.writelines(self.tmpDict[owner]) self._clean(owner) return True @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='', sender_keyword='sender', connection_keyword='conn') def delete(self, sender=None, conn=None): """ Delete existing Udev rule """ info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() self._clean(owner) if os.path.exists(UDEV_RULES_PATH % user): #auth to delete rule self._checkPolkitPrivilege(sender, conn, 'net.launchpad.backintime.UdevRuleDelete') os.remove(UDEV_RULES_PATH % user) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='', sender_keyword='sender', connection_keyword='conn') def clean(self, sender=None, conn=None): """ clean up previous cached rules """ info = SenderInfo(sender, conn) self._clean(info.nameOwner()) def _clean(self, owner): if owner in self.tmpDict: del self.tmpDict[owner] def _initPolkit(self): if self.polkit is None: self.polkit = dbus.Interface(dbus.SystemBus().get_object( 'org.freedesktop.PolicyKit1', '/org/freedesktop/PolicyKit1/Authority', False), 'org.freedesktop.PolicyKit1.Authority') def _checkPolkitPrivilege(self, sender, conn, privilege): # from jockey """ Verify that sender has a given PolicyKit privilege. sender is the sender's (private) D-BUS name, such as ":1:42" (sender_keyword in @dbus.service.methods). conn is the dbus.Connection object (connection_keyword in @dbus.service.methods). privilege is the PolicyKit privilege string. This method returns if the caller is privileged, and otherwise throws a PermissionDeniedByPolicy exception. """ if sender is None and conn is None: # called locally, not through D-BUS return if not self.enforce_polkit: # that happens for testing purposes when running on the session # bus, and it does not make sense to restrict operations here return # query PolicyKit self._initPolkit() try: # we don't need is_challenge return here, since we call with AllowUserInteraction (is_auth, _, details) = self.polkit.CheckAuthorization( ('system-bus-name', {'name': dbus.String(sender, variant_level=1)}), privilege, {'': ''}, dbus.UInt32(1), '', timeout=3000) except dbus.DBusException as e: if e._dbus_error_name == 'org.freedesktop.DBus.Error.ServiceUnknown': # polkitd timed out, connect again self.polkit = None return self._checkPolkitPrivilege(sender, conn, privilege) else: raise if not is_auth: raise PermissionDeniedByPolicy(privilege) class SenderInfo(object): def __init__(self, sender, conn): self.sender = sender self.dbus_info = dbus.Interface(conn.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus/Bus', False), 'org.freedesktop.DBus') def connectionUnixUser(self): uid = self.dbus_info.GetConnectionUnixUser(self.sender) if pwd: return pwd.getpwuid(uid).pw_name else: return uid def nameOwner(self): return self.dbus_info.GetNameOwner(self.sender) def connectionPid(self): return self.dbus_info.GetConnectionUnixProcessID(self.sender) if __name__ == '__main__': dbus.mainloop.pyqt5.DBusQtMainLoop(set_as_default=True) app = QCoreApplication([]) bus = dbus.SystemBus() name = dbus.service.BusName("net.launchpad.backintime.serviceHelper", bus) object = UdevRules(bus, '/UdevRules') print("Running BIT service.") app.exec_()
./CrossVul/dataset_final_sorted/CWE-362/py/good_3284_0
crossvul-python_data_bad_3284_0
# (from BackInTime) # Copyright (C) 2015-2017 Germar Reitze # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # (from jockey) # (c) 2008 Canonical Ltd. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # (from python-dbus-docs) # Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/> # Copyright (C) 2005-2007 Collabora Ltd. <http://www.collabora.co.uk/> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # This file was modified by David D. Lowe in 2009. # To the extent possible under law, David D. Lowe has waived all # copyright and related or neighboring rights to his modifications to # this file under this license: http://creativecommons.org/publicdomain/zero/1.0/ import os import re from subprocess import Popen, PIPE try: import pwd except ImportError: pwd = None import dbus import dbus.service import dbus.mainloop.pyqt5 from PyQt5.QtCore import QCoreApplication UDEV_RULES_PATH = '/etc/udev/rules.d/99-backintime-%s.rules' class InvalidChar(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.InvalidChar' class InvalidCmd(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.InvalidCmd' class LimitExceeded(dbus.DBusException): _dbus_error_name = 'net.launchpad.backintime.LimitExceeded' class PermissionDeniedByPolicy(dbus.DBusException): _dbus_error_name = 'com.ubuntu.DeviceDriver.PermissionDeniedByPolicy' class UdevRules(dbus.service.Object): def __init__(self, conn=None, object_path=None, bus_name=None): super(UdevRules, self).__init__(conn, object_path, bus_name) # the following variables are used by _checkPolkitPrivilege self.polkit = None self.enforce_polkit = True self.tmpDict = {} #find su path self.su = self._which('su', '/bin/su') self.backintime = self._which('backintime', '/usr/bin/backintime') self.nice = self._which('nice', '/usr/bin/nice') self.ionice = self._which('ionice', '/usr/bin/ionice') self.max_rules = 100 self.max_users = 20 self.max_cmd_len = 100 def _which(self, exe, fallback): proc = Popen(['which', exe], stdout = PIPE) ret = proc.communicate()[0].strip().decode() if proc.returncode or not ret: return fallback return ret def _validateCmd(self, cmd): if cmd.find("&&") != -1: raise InvalidCmd("Parameter 'cmd' contains '&&' concatenation") # make sure it starts with an absolute path elif not cmd.startswith(os.path.sep): raise InvalidCmd("Parameter 'cmd' does not start with '/'") parts = cmd.split() # make sure only well known commands and switches are used whitelist = ( (self.nice, ("-n")), (self.ionice, ("-c", "-n")), ) for c, switches in whitelist: if parts and parts[0] == c: parts.pop(0) for sw in switches: while parts and parts[0].startswith(sw): parts.pop(0) if not parts: raise InvalidCmd("Parameter 'cmd' does not contain the backintime command") elif parts[0] != self.backintime: raise InvalidCmd("Parameter 'cmd' contains non-whitelisted cmd/parameter (%s)" % parts[0]) def _checkLimits(self, owner, cmd): if len(self.tmpDict.get(owner, [])) >= self.max_rules: raise LimitExceeded("Maximum number of cached rules reached (%d)" % self.max_rules) elif len(self.tmpDict) >= self.max_users: raise LimitExceeded("Maximum number of cached users reached (%d)" % self.max_users) elif len(cmd) > self.max_cmd_len: raise LimitExceeded("Maximum length of command line reached (%d)" % self.max_cmd_len) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='ss', out_signature='', sender_keyword='sender', connection_keyword='conn') def addRule(self, cmd, uuid, sender=None, conn=None): """ Receive command and uuid and create an Udev rule out of this. This is done on the service side to prevent malicious code to run as root. """ #prevent breaking out of su command chars = re.findall(r'[^a-zA-Z0-9-/\.>& ]', cmd) if chars: raise InvalidChar("Parameter 'cmd' contains invalid character(s) %s" % '|'.join(set(chars))) #only allow relevant chars in uuid chars = re.findall(r'[^a-zA-Z0-9-]', uuid) if chars: raise InvalidChar("Parameter 'uuid' contains invalid character(s) %s" % '|'.join(set(chars))) self._validateCmd(cmd) info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() self._checkLimits(owner, cmd) #create su command sucmd = "%s - '%s' -c '%s'" %(self.su, user, cmd) #create Udev rule rule = 'ACTION=="add|change", ENV{ID_FS_UUID}=="%s", RUN+="%s"\n' %(uuid, sucmd) #store rule if not owner in self.tmpDict: self.tmpDict[owner] = [] self.tmpDict[owner].append(rule) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='b', sender_keyword='sender', connection_keyword='conn') def save(self, sender=None, conn=None): """ Save rules to destiantion file after user authenticated as admin. This will first check if there are any changes between temporary added rules and current rules in destiantion file. Returns False if files are identical or no rules to be installed. """ info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() #delete rule if no rules in tmp if not owner in self.tmpDict or not self.tmpDict[owner]: self.delete(sender, conn) return False #return False if rule already exist. if os.path.exists(UDEV_RULES_PATH % user): with open(UDEV_RULES_PATH % user, 'r') as f: if self.tmpDict[owner] == f.readlines(): self._clean(owner) return False #auth to save changes self._checkPolkitPrivilege(sender, conn, 'net.launchpad.backintime.UdevRuleSave') with open(UDEV_RULES_PATH % user, 'w') as f: f.writelines(self.tmpDict[owner]) self._clean(owner) return True @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='', sender_keyword='sender', connection_keyword='conn') def delete(self, sender=None, conn=None): """ Delete existing Udev rule """ info = SenderInfo(sender, conn) user = info.connectionUnixUser() owner = info.nameOwner() self._clean(owner) if os.path.exists(UDEV_RULES_PATH % user): #auth to delete rule self._checkPolkitPrivilege(sender, conn, 'net.launchpad.backintime.UdevRuleDelete') os.remove(UDEV_RULES_PATH % user) @dbus.service.method("net.launchpad.backintime.serviceHelper.UdevRules", in_signature='', out_signature='', sender_keyword='sender', connection_keyword='conn') def clean(self, sender=None, conn=None): """ clean up previous cached rules """ info = SenderInfo(sender, conn) self._clean(info.nameOwner()) def _clean(self, owner): if owner in self.tmpDict: del self.tmpDict[owner] def _initPolkit(self): if self.polkit is None: self.polkit = dbus.Interface(dbus.SystemBus().get_object( 'org.freedesktop.PolicyKit1', '/org/freedesktop/PolicyKit1/Authority', False), 'org.freedesktop.PolicyKit1.Authority') def _checkPolkitPrivilege(self, sender, conn, privilege): # from jockey """ Verify that sender has a given PolicyKit privilege. sender is the sender's (private) D-BUS name, such as ":1:42" (sender_keyword in @dbus.service.methods). conn is the dbus.Connection object (connection_keyword in @dbus.service.methods). privilege is the PolicyKit privilege string. This method returns if the caller is privileged, and otherwise throws a PermissionDeniedByPolicy exception. """ if sender is None and conn is None: # called locally, not through D-BUS return if not self.enforce_polkit: # that happens for testing purposes when running on the session # bus, and it does not make sense to restrict operations here return info = SenderInfo(sender, conn) # get peer PID pid = info.connectionPid() # query PolicyKit self._initPolkit() try: # we don't need is_challenge return here, since we call with AllowUserInteraction (is_auth, _, details) = self.polkit.CheckAuthorization( ('unix-process', {'pid': dbus.UInt32(pid, variant_level=1), 'start-time': dbus.UInt64(0, variant_level=1)}), privilege, {'': ''}, dbus.UInt32(1), '', timeout=3000) except dbus.DBusException as e: if e._dbus_error_name == 'org.freedesktop.DBus.Error.ServiceUnknown': # polkitd timed out, connect again self.polkit = None return self._checkPolkitPrivilege(sender, conn, privilege) else: raise if not is_auth: raise PermissionDeniedByPolicy(privilege) class SenderInfo(object): def __init__(self, sender, conn): self.sender = sender self.dbus_info = dbus.Interface(conn.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus/Bus', False), 'org.freedesktop.DBus') def connectionUnixUser(self): uid = self.dbus_info.GetConnectionUnixUser(self.sender) if pwd: return pwd.getpwuid(uid).pw_name else: return uid def nameOwner(self): return self.dbus_info.GetNameOwner(self.sender) def connectionPid(self): return self.dbus_info.GetConnectionUnixProcessID(self.sender) if __name__ == '__main__': dbus.mainloop.pyqt5.DBusQtMainLoop(set_as_default=True) app = QCoreApplication([]) bus = dbus.SystemBus() name = dbus.service.BusName("net.launchpad.backintime.serviceHelper", bus) object = UdevRules(bus, '/UdevRules') print("Running BIT service.") app.exec_()
./CrossVul/dataset_final_sorted/CWE-362/py/bad_3284_0
crossvul-python_data_bad_3333_0
# -*- coding: utf-8 -*- # # This file is part of Radicale Server - Calendar Server # Copyright © 2008 Nicolas Kandel # Copyright © 2008 Pascal Halter # Copyright © 2008-2013 Guillaume Ayoub # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Radicale. If not, see <http://www.gnu.org/licenses/>. """ Implement htpasswd authentication. Apache's htpasswd command (httpd.apache.org/docs/programs/htpasswd.html) manages a file for storing user credentials. It can encrypt passwords using different methods, e.g. BCRYPT, MD5-APR1 (a version of MD5 modified for Apache), SHA1, or by using the system's CRYPT routine. The CRYPT and SHA1 encryption methods implemented by htpasswd are considered as insecure. MD5-APR1 provides medium security as of 2015. Only BCRYPT can be considered secure by current standards. MD5-APR1-encrypted credentials can be written by all versions of htpasswd (its the default, in fact), whereas BCRYPT requires htpasswd 2.4.x or newer. The `is_authenticated(user, password)` function provided by this module verifies the user-given credentials by parsing the htpasswd credential file pointed to by the ``htpasswd_filename`` configuration value while assuming the password encryption method specified via the ``htpasswd_encryption`` configuration value. The following htpasswd password encrpytion methods are supported by Radicale out-of-the-box: - plain-text (created by htpasswd -p...) -- INSECURE - CRYPT (created by htpasswd -d...) -- INSECURE - SHA1 (created by htpasswd -s...) -- INSECURE When passlib (https://pypi.python.org/pypi/passlib) is importable, the following significantly more secure schemes are parsable by Radicale: - MD5-APR1 (htpasswd -m...) -- htpasswd's default method - BCRYPT (htpasswd -B...) -- Requires htpasswd 2.4.x """ import base64 import hashlib import os from .. import config FILENAME = os.path.expanduser(config.get("auth", "htpasswd_filename")) ENCRYPTION = config.get("auth", "htpasswd_encryption") def _plain(hash_value, password): """Check if ``hash_value`` and ``password`` match, using plain method.""" return hash_value == password def _crypt(hash_value, password): """Check if ``hash_value`` and ``password`` match, using crypt method.""" return crypt.crypt(password, hash_value) == hash_value def _sha1(hash_value, password): """Check if ``hash_value`` and ``password`` match, using sha1 method.""" hash_value = hash_value.replace("{SHA}", "").encode("ascii") password = password.encode(config.get("encoding", "stock")) sha1 = hashlib.sha1() # pylint: disable=E1101 sha1.update(password) return sha1.digest() == base64.b64decode(hash_value) def _ssha(hash_salt_value, password): """Check if ``hash_salt_value`` and ``password`` match, using salted sha1 method. This method is not directly supported by htpasswd, but it can be written with e.g. openssl, and nginx can parse it.""" hash_salt_value = base64.b64decode(hash_salt_value.replace("{SSHA}", "")) password = password.encode(config.get("encoding", "stock")) hash_value = hash_salt_value[:20] salt_value = hash_salt_value[20:] sha1 = hashlib.sha1() # pylint: disable=E1101 sha1.update(password) sha1.update(salt_value) return sha1.digest() == hash_value def _bcrypt(hash_value, password): return _passlib_bcrypt.verify(password, hash_value) def _md5apr1(hash_value, password): return _passlib_md5apr1.verify(password, hash_value) # Prepare mapping between encryption names and verification functions. # Pre-fill with methods that do not have external dependencies. _verifuncs = { "ssha": _ssha, "sha1": _sha1, "plain": _plain} # Conditionally attempt to import external dependencies. if ENCRYPTION == "md5": try: from passlib.hash import apr_md5_crypt as _passlib_md5apr1 except ImportError: raise RuntimeError(("The htpasswd_encryption method 'md5' requires " "availability of the passlib module.")) _verifuncs["md5"] = _md5apr1 elif ENCRYPTION == "bcrypt": try: from passlib.hash import bcrypt as _passlib_bcrypt except ImportError: raise RuntimeError(("The htpasswd_encryption method 'bcrypt' requires " "availability of the passlib module with bcrypt support.")) # A call to `encrypt` raises passlib.exc.MissingBackendError with a good # error message if bcrypt backend is not available. Trigger this here. _passlib_bcrypt.encrypt("test-bcrypt-backend") _verifuncs["bcrypt"] = _bcrypt elif ENCRYPTION == "crypt": try: import crypt except ImportError: raise RuntimeError(("The htpasswd_encryption method 'crypt' requires " "crypt() system support.")) _verifuncs["crypt"] = _crypt # Validate initial configuration. if ENCRYPTION not in _verifuncs: raise RuntimeError(("The htpasswd encryption method '%s' is not " "supported." % ENCRYPTION)) def is_authenticated(user, password): """Validate credentials. Iterate through htpasswd credential file until user matches, extract hash (encrypted password) and check hash against user-given password, using the method specified in the Radicale config. """ with open(FILENAME) as f: for line in f: strippedline = line.strip() if strippedline: login, hash_value = strippedline.split(":") if login == user: # Allow encryption method to be overridden at runtime. return _verifuncs[ENCRYPTION](hash_value, password) return False
./CrossVul/dataset_final_sorted/CWE-362/py/bad_3333_0
crossvul-python_data_bad_487_2
from k5test import * realm = K5Realm(create_host=False, get_creds=False) usercache = 'FILE:' + os.path.join(realm.testdir, 'usercache') storagecache = 'FILE:' + os.path.join(realm.testdir, 'save') # Create two service principals with keys in the default keytab. service1 = 'service/1@%s' % realm.realm realm.addprinc(service1) realm.extract_keytab(service1, realm.keytab) service2 = 'service/2@%s' % realm.realm realm.addprinc(service2) realm.extract_keytab(service2, realm.keytab) puser = 'p:' + realm.user_princ pservice1 = 'p:' + service1 pservice2 = 'p:' + service2 # Get forwardable creds for service1 in the default cache. realm.kinit(service1, None, ['-f', '-k']) # Try krb5 -> S4U2Proxy with forwardable user creds. This should fail # at the S4U2Proxy step since the DB2 back end currently has no # support for allowing it. realm.kinit(realm.user_princ, password('user'), ['-f', '-c', usercache]) output = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2], expected_code=1) if ('auth1: ' + realm.user_princ not in output or 'NOT_ALLOWED_TO_DELEGATE' not in output): fail('krb5 -> s4u2proxy') # Again with SPNEGO. output = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache, '-', pservice1, pservice2], expected_code=1) if ('auth1: ' + realm.user_princ not in output or 'NOT_ALLOWED_TO_DELEGATE' not in output): fail('krb5 -> s4u2proxy (SPNEGO)') # Try krb5 -> S4U2Proxy without forwardable user creds. This should # result in no delegated credential being created by # accept_sec_context. realm.kinit(realm.user_princ, password('user'), ['-c', usercache]) realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, pservice1, pservice1, pservice2], expected_msg='no credential delegated') # Try S4U2Self. Ask for an S4U2Proxy step; this won't happen because # service/1 isn't allowed to get a forwardable S4U2Self ticket. output = realm.run(['./t_s4u', puser, pservice2]) if ('Warning: no delegated cred handle' not in output or 'Source name:\t' + realm.user_princ not in output): fail('s4u2self') output = realm.run(['./t_s4u', '--spnego', puser, pservice2]) if ('Warning: no delegated cred handle' not in output or 'Source name:\t' + realm.user_princ not in output): fail('s4u2self (SPNEGO)') # Correct that problem and try again. As above, the S4U2Proxy step # won't actually succeed since we don't support that in DB2. realm.run([kadminl, 'modprinc', '+ok_to_auth_as_delegate', service1]) realm.run(['./t_s4u', puser, pservice2], expected_code=1, expected_msg='NOT_ALLOWED_TO_DELEGATE') # Again with SPNEGO. This uses SPNEGO for the initial authentication, # but still uses krb5 for S4U2Proxy--the delegated cred is returned as # a krb5 cred, not a SPNEGO cred, and t_s4u uses the delegated cred # directly rather than saving and reacquiring it. realm.run(['./t_s4u', '--spnego', puser, pservice2], expected_code=1, expected_msg='NOT_ALLOWED_TO_DELEGATE') realm.stop() # Set up a realm using the test KDB module so that we can do # successful S4U2Proxy delegations. testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'}, 'user': {'keys': 'aes128-cts'}, 'service/1': {'flags': '+ok-to-auth-as-delegate', 'keys': 'aes128-cts'}, 'service/2': {'keys': 'aes128-cts'}} conf = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'delegation': {'service/1': 'service/2'}}}} realm = K5Realm(create_kdb=False, kdc_conf=conf) userkeytab = 'FILE:' + os.path.join(realm.testdir, 'userkeytab') realm.extract_keytab(realm.user_princ, userkeytab) realm.extract_keytab(service1, realm.keytab) realm.extract_keytab(service2, realm.keytab) realm.start_kdc() # Get forwardable creds for service1 in the default cache. realm.kinit(service1, None, ['-f', '-k']) # Successful krb5 -> S4U2Proxy, with krb5 and SPNEGO mechs. realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') out = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Successful S4U2Self -> S4U2Proxy. out = realm.run(['./t_s4u', puser, pservice2]) # Regression test for #8139: get a user ticket directly for service1 and # try krb5 -> S4U2Proxy. realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab, '-S', service1]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Simulate a krbtgt rollover and verify that the user ticket can still # be validated. realm.stop_kdc() newtgt_keys = ['2 aes128-cts', '1 aes128-cts'] newtgt_princs = {'krbtgt/KRBTEST.COM': {'keys': newtgt_keys}} newtgt_conf = {'dbmodules': {'test': {'princs': newtgt_princs}}} newtgt_env = realm.special_env('newtgt', True, kdc_conf=newtgt_conf) realm.start_kdc(env=newtgt_env) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Get a user ticket after the krbtgt rollover and verify that # S4U2Proxy delegation works (also a #8139 regression test). realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') realm.stop() # Test cross realm S4U2Self using server referrals. mark('cross-realm S4U2Self') testprincs = {'krbtgt/SREALM': {'keys': 'aes128-cts'}, 'krbtgt/UREALM': {'keys': 'aes128-cts'}, 'user': {'keys': 'aes128-cts', 'flags': '+preauth'}} kdcconf1 = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'alias': {'enterprise@abc': '@UREALM'}}}} kdcconf2 = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'alias': {'user@SREALM': '@SREALM', 'enterprise@abc': 'user'}}}} r1, r2 = cross_realms(2, xtgts=(), args=({'realm': 'SREALM', 'kdc_conf': kdcconf1}, {'realm': 'UREALM', 'kdc_conf': kdcconf2}), create_kdb=False) r1.start_kdc() r2.start_kdc() r1.extract_keytab(r1.user_princ, r1.keytab) r1.kinit(r1.user_princ, None, ['-k', '-t', r1.keytab]) # Include a regression test for #8741 by unsetting the default realm. remove_default = {'libdefaults': {'default_realm': None}} no_default = r1.special_env('no_default', False, krb5_conf=remove_default) msgs = ('Getting credentials user@UREALM -> user@SREALM', '/Matching credential not found', 'Getting credentials user@SREALM -> krbtgt/UREALM@SREALM', 'Received creds for desired service krbtgt/UREALM@SREALM', 'via TGT krbtgt/UREALM@SREALM after requesting user\\@SREALM@UREALM', 'krbtgt/SREALM@UREALM differs from requested user\\@SREALM@UREALM', 'via TGT krbtgt/SREALM@UREALM after requesting user@SREALM', 'TGS reply is for user@UREALM -> user@SREALM') r1.run(['./t_s4u', 'p:' + r2.user_princ, '-', r1.keytab], env=no_default, expected_trace=msgs) # Test realm identification of enterprise principal names ([MS-S4U] # 3.1.5.1.1.1). Attach a bogus realm to the enterprise name to verify # that we start at the server realm. mark('cross-realm S4U2Self with enterprise name') msgs = ('Getting initial credentials for enterprise\\@abc@SREALM', 'Processing preauth types: PA-FOR-X509-USER (130)', 'Sending unauthenticated request', '/Realm not local to KDC', 'Following referral to realm UREALM', 'Processing preauth types: PA-FOR-X509-USER (130)', 'Sending unauthenticated request', '/Additional pre-authentication required', '/Generic preauthentication failure', 'Getting credentials enterprise\\@abc@UREALM -> user@SREALM', 'TGS reply is for enterprise\@abc@UREALM -> user@SREALM') r1.run(['./t_s4u', 'e:enterprise@abc@NOREALM', '-', r1.keytab], expected_trace=msgs) r1.stop() r2.stop() success('S4U test cases')
./CrossVul/dataset_final_sorted/CWE-617/py/bad_487_2
crossvul-python_data_good_487_2
from k5test import * realm = K5Realm(create_host=False, get_creds=False) usercache = 'FILE:' + os.path.join(realm.testdir, 'usercache') storagecache = 'FILE:' + os.path.join(realm.testdir, 'save') # Create two service principals with keys in the default keytab. service1 = 'service/1@%s' % realm.realm realm.addprinc(service1) realm.extract_keytab(service1, realm.keytab) service2 = 'service/2@%s' % realm.realm realm.addprinc(service2) realm.extract_keytab(service2, realm.keytab) puser = 'p:' + realm.user_princ pservice1 = 'p:' + service1 pservice2 = 'p:' + service2 # Get forwardable creds for service1 in the default cache. realm.kinit(service1, None, ['-f', '-k']) # Try S4U2Self for user with a restricted password. realm.run([kadminl, 'modprinc', '+needchange', realm.user_princ]) realm.run(['./t_s4u', 'e:user', '-']) realm.run([kadminl, 'modprinc', '-needchange', '-pwexpire', '1/1/2000', realm.user_princ]) realm.run(['./t_s4u', 'e:user', '-']) realm.run([kadminl, 'modprinc', '-pwexpire', 'never', realm.user_princ]) # Try krb5 -> S4U2Proxy with forwardable user creds. This should fail # at the S4U2Proxy step since the DB2 back end currently has no # support for allowing it. realm.kinit(realm.user_princ, password('user'), ['-f', '-c', usercache]) output = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2], expected_code=1) if ('auth1: ' + realm.user_princ not in output or 'NOT_ALLOWED_TO_DELEGATE' not in output): fail('krb5 -> s4u2proxy') # Again with SPNEGO. output = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache, '-', pservice1, pservice2], expected_code=1) if ('auth1: ' + realm.user_princ not in output or 'NOT_ALLOWED_TO_DELEGATE' not in output): fail('krb5 -> s4u2proxy (SPNEGO)') # Try krb5 -> S4U2Proxy without forwardable user creds. This should # result in no delegated credential being created by # accept_sec_context. realm.kinit(realm.user_princ, password('user'), ['-c', usercache]) realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, pservice1, pservice1, pservice2], expected_msg='no credential delegated') # Try S4U2Self. Ask for an S4U2Proxy step; this won't happen because # service/1 isn't allowed to get a forwardable S4U2Self ticket. output = realm.run(['./t_s4u', puser, pservice2]) if ('Warning: no delegated cred handle' not in output or 'Source name:\t' + realm.user_princ not in output): fail('s4u2self') output = realm.run(['./t_s4u', '--spnego', puser, pservice2]) if ('Warning: no delegated cred handle' not in output or 'Source name:\t' + realm.user_princ not in output): fail('s4u2self (SPNEGO)') # Correct that problem and try again. As above, the S4U2Proxy step # won't actually succeed since we don't support that in DB2. realm.run([kadminl, 'modprinc', '+ok_to_auth_as_delegate', service1]) realm.run(['./t_s4u', puser, pservice2], expected_code=1, expected_msg='NOT_ALLOWED_TO_DELEGATE') # Again with SPNEGO. This uses SPNEGO for the initial authentication, # but still uses krb5 for S4U2Proxy--the delegated cred is returned as # a krb5 cred, not a SPNEGO cred, and t_s4u uses the delegated cred # directly rather than saving and reacquiring it. realm.run(['./t_s4u', '--spnego', puser, pservice2], expected_code=1, expected_msg='NOT_ALLOWED_TO_DELEGATE') realm.stop() # Set up a realm using the test KDB module so that we can do # successful S4U2Proxy delegations. testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'}, 'user': {'keys': 'aes128-cts'}, 'service/1': {'flags': '+ok-to-auth-as-delegate', 'keys': 'aes128-cts'}, 'service/2': {'keys': 'aes128-cts'}} conf = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'delegation': {'service/1': 'service/2'}}}} realm = K5Realm(create_kdb=False, kdc_conf=conf) userkeytab = 'FILE:' + os.path.join(realm.testdir, 'userkeytab') realm.extract_keytab(realm.user_princ, userkeytab) realm.extract_keytab(service1, realm.keytab) realm.extract_keytab(service2, realm.keytab) realm.start_kdc() # Get forwardable creds for service1 in the default cache. realm.kinit(service1, None, ['-f', '-k']) # Successful krb5 -> S4U2Proxy, with krb5 and SPNEGO mechs. realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') out = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Successful S4U2Self -> S4U2Proxy. out = realm.run(['./t_s4u', puser, pservice2]) # Regression test for #8139: get a user ticket directly for service1 and # try krb5 -> S4U2Proxy. realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab, '-S', service1]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Simulate a krbtgt rollover and verify that the user ticket can still # be validated. realm.stop_kdc() newtgt_keys = ['2 aes128-cts', '1 aes128-cts'] newtgt_princs = {'krbtgt/KRBTEST.COM': {'keys': newtgt_keys}} newtgt_conf = {'dbmodules': {'test': {'princs': newtgt_princs}}} newtgt_env = realm.special_env('newtgt', True, kdc_conf=newtgt_conf) realm.start_kdc(env=newtgt_env) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') # Get a user ticket after the krbtgt rollover and verify that # S4U2Proxy delegation works (also a #8139 regression test). realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache, '-t', userkeytab]) out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-', pservice1, pservice2]) if 'auth1: user@' not in out or 'auth2: user@' not in out: fail('krb5 -> s4u2proxy') realm.stop() # Test cross realm S4U2Self using server referrals. mark('cross-realm S4U2Self') testprincs = {'krbtgt/SREALM': {'keys': 'aes128-cts'}, 'krbtgt/UREALM': {'keys': 'aes128-cts'}, 'user': {'keys': 'aes128-cts', 'flags': '+preauth'}} kdcconf1 = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'alias': {'enterprise@abc': '@UREALM'}}}} kdcconf2 = {'realms': {'$realm': {'database_module': 'test'}}, 'dbmodules': {'test': {'db_library': 'test', 'princs': testprincs, 'alias': {'user@SREALM': '@SREALM', 'enterprise@abc': 'user'}}}} r1, r2 = cross_realms(2, xtgts=(), args=({'realm': 'SREALM', 'kdc_conf': kdcconf1}, {'realm': 'UREALM', 'kdc_conf': kdcconf2}), create_kdb=False) r1.start_kdc() r2.start_kdc() r1.extract_keytab(r1.user_princ, r1.keytab) r1.kinit(r1.user_princ, None, ['-k', '-t', r1.keytab]) # Include a regression test for #8741 by unsetting the default realm. remove_default = {'libdefaults': {'default_realm': None}} no_default = r1.special_env('no_default', False, krb5_conf=remove_default) msgs = ('Getting credentials user@UREALM -> user@SREALM', '/Matching credential not found', 'Getting credentials user@SREALM -> krbtgt/UREALM@SREALM', 'Received creds for desired service krbtgt/UREALM@SREALM', 'via TGT krbtgt/UREALM@SREALM after requesting user\\@SREALM@UREALM', 'krbtgt/SREALM@UREALM differs from requested user\\@SREALM@UREALM', 'via TGT krbtgt/SREALM@UREALM after requesting user@SREALM', 'TGS reply is for user@UREALM -> user@SREALM') r1.run(['./t_s4u', 'p:' + r2.user_princ, '-', r1.keytab], env=no_default, expected_trace=msgs) # Test realm identification of enterprise principal names ([MS-S4U] # 3.1.5.1.1.1). Attach a bogus realm to the enterprise name to verify # that we start at the server realm. mark('cross-realm S4U2Self with enterprise name') msgs = ('Getting initial credentials for enterprise\\@abc@SREALM', 'Processing preauth types: PA-FOR-X509-USER (130)', 'Sending unauthenticated request', '/Realm not local to KDC', 'Following referral to realm UREALM', 'Processing preauth types: PA-FOR-X509-USER (130)', 'Sending unauthenticated request', '/Additional pre-authentication required', '/Generic preauthentication failure', 'Getting credentials enterprise\\@abc@UREALM -> user@SREALM', 'TGS reply is for enterprise\@abc@UREALM -> user@SREALM') r1.run(['./t_s4u', 'e:enterprise@abc@NOREALM', '-', r1.keytab], expected_trace=msgs) r1.stop() r2.stop() success('S4U test cases')
./CrossVul/dataset_final_sorted/CWE-617/py/good_487_2
crossvul-python_data_good_5563_1
# packet.py # # Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net> # # A RADIUS packet as defined in RFC 2138 import struct import random try: import hashlib md5_constructor = hashlib.md5 except ImportError: # BBB for python 2.4 import md5 md5_constructor = md5.new import six from pyrad import tools # Packet codes AccessRequest = 1 AccessAccept = 2 AccessReject = 3 AccountingRequest = 4 AccountingResponse = 5 AccessChallenge = 11 StatusServer = 12 StatusClient = 13 DisconnectRequest = 40 DisconnectACK = 41 DisconnectNAK = 42 CoARequest = 43 CoAACK = 44 CoANAK = 45 # Use cryptographic-safe random generator as provided by the OS. random_generator = random.SystemRandom() # Current ID CurrentID = random_generator.randrange(1, 255) class PacketError(Exception): pass class Packet(dict): """Packet acts like a standard python map to provide simple access to the RADIUS attributes. Since RADIUS allows for repeated attributes the value will always be a sequence. pyrad makes sure to preserve the ordering when encoding and decoding packets. There are two ways to use the map intereface: if attribute names are used pyrad take care of en-/decoding data. If the attribute type number (or a vendor ID/attribute type tuple for vendor attributes) is used you work with the raw data. Normally you will not use this class directly, but one of the :obj:`AuthPacket` or :obj:`AcctPacket` classes. """ def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ dict.__init__(self) self.code = code if id is not None: self.id = id else: self.id = CreateID() if not isinstance(secret, six.binary_type): raise TypeError('secret must be a binary string') self.secret = secret if authenticator is not None and \ not isinstance(authenticator, six.binary_type): raise TypeError('authenticator must be a binary string') self.authenticator = authenticator if 'dict' in attributes: self.dict = attributes['dict'] if 'packet' in attributes: self.DecodePacket(attributes['packet']) for (key, value) in attributes.items(): if key in ['dict', 'fd', 'packet']: continue key = key.replace('_', '-') self.AddAttribute(key, value) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return Packet(id=self.id, secret=self.secret, authenticator=self.authenticator, dict=self.dict, **attributes) def _DecodeValue(self, attr, value): if attr.values.HasBackward(value): return attr.values.GetBackward(value) else: return tools.DecodeAttr(attr.type, value) def _EncodeValue(self, attr, value): if attr.values.HasForward(value): return attr.values.GetForward(value) else: return tools.EncodeAttr(attr.type, value) def _EncodeKeyValues(self, key, values): if not isinstance(key, str): return (key, values) attr = self.dict.attributes[key] if attr.vendor: key = (self.dict.vendors.GetForward(attr.vendor), attr.code) else: key = attr.code return (key, [self._EncodeValue(attr, v) for v in values]) def _EncodeKey(self, key): if not isinstance(key, str): return key attr = self.dict.attributes[key] if attr.vendor: return (self.dict.vendors.GetForward(attr.vendor), attr.code) else: return attr.code def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ (key, value) = self._EncodeKeyValues(key, [value]) value = value[0] self.setdefault(key, []).append(value) def __getitem__(self, key): if not isinstance(key, six.string_types): return dict.__getitem__(self, key) values = dict.__getitem__(self, self._EncodeKey(key)) attr = self.dict.attributes[key] res = [] for v in values: res.append(self._DecodeValue(attr, v)) return res def __contains__(self, key): try: return dict.__contains__(self, self._EncodeKey(key)) except KeyError: return False has_key = __contains__ def __delitem__(self, key): dict.__delitem__(self, self._EncodeKey(key)) def __setitem__(self, key, item): if isinstance(key, six.string_types): (key, item) = self._EncodeKeyValues(key, [item]) dict.__setitem__(self, key, item) else: assert isinstance(item, list) dict.__setitem__(self, key, item) def keys(self): return [self._DecodeKey(key) for key in dict.keys(self)] @staticmethod def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random_generator.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data) def CreateID(self): """Create a packet ID. All RADIUS requests have a ID which is used to identify a request. This is used to detect retries and replay attacks. This function returns a suitable random number that can be used as ID. :return: ID number :rtype: integer """ return random_generator.randrange(0, 256) def ReplyPacket(self): """Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string """ assert(self.authenticator) assert(self.secret) attr = self._PktEncodeAttributes() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) authenticator = md5_constructor(header[0:4] + self.authenticator + attr + self.secret).digest() return header + authenticator + attr def VerifyReply(self, reply, rawreply=None): if reply.id != self.id: return False if rawreply is None: rawreply = reply.ReplyPacket() hash = md5_constructor(rawreply[0:4] + self.authenticator + rawreply[20:] + self.secret).digest() if hash != rawreply[4:20]: return False return True def _PktEncodeAttribute(self, key, value): if isinstance(key, tuple): value = struct.pack('!L', key[0]) + \ self._PktEncodeAttribute(key[1], value) key = 26 return struct.pack('!BB', key, (len(value) + 2)) + value def _PktEncodeAttributes(self): result = six.b('') for (code, datalst) in self.items(): for data in datalst: result += self._PktEncodeAttribute(code, data) return result def _PktDecodeVendorAttribute(self, data): # Check if this packet is long enough to be in the # RFC2865 recommended form if len(data) < 6: return (26, data) (vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3] # Another sanity check if len(data) != length + 4: return (26, data) return ((vendor, type), data[6:]) def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: (key, value) = self._PktDecodeVendorAttribute(value) self.setdefault(key, []).append(value) packet = packet[attrlen:] class AuthPacket(Packet): def __init__(self, code=AccessRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param code: packet type code :type code: integer (8bits) :param id: packet identifaction number :type id: integer (8 bits) :param secret: secret needed to communicate with a RADIUS server :type secret: string :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AuthPacket(AccessAccept, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH16s', self.code, self.id, (20 + len(attr)), self.authenticator) return header + attr def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8') def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result class AcctPacket(Packet): """RADIUS accounting packets. This class is a specialization of the generic :obj:`Packet` class for accounting packets. """ def __init__(self, code=AccountingRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) if 'packet' in attributes: self.raw_packet = attributes['packet'] def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AcctPacket(AccountingResponse, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def VerifyAcctRequest(self): """Verify request authenticator. :return: True if verification failed else False :rtype: boolean """ assert(self.raw_packet) hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') + self.raw_packet[20:] + self.secret).digest() return hash == self.authenticator def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr + self.secret).digest() return header + self.authenticator + attr def CreateID(): """Generate a packet ID. :return: packet ID :rtype: 8 bit integer """ global CurrentID CurrentID = (CurrentID + 1) % 256 return CurrentID
./CrossVul/dataset_final_sorted/CWE-330/py/good_5563_1
crossvul-python_data_bad_5563_1
# packet.py # # Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net> # # A RADIUS packet as defined in RFC 2138 import struct import random try: import hashlib md5_constructor = hashlib.md5 except ImportError: # BBB for python 2.4 import md5 md5_constructor = md5.new import six from pyrad import tools # Packet codes AccessRequest = 1 AccessAccept = 2 AccessReject = 3 AccountingRequest = 4 AccountingResponse = 5 AccessChallenge = 11 StatusServer = 12 StatusClient = 13 DisconnectRequest = 40 DisconnectACK = 41 DisconnectNAK = 42 CoARequest = 43 CoAACK = 44 CoANAK = 45 # Current ID CurrentID = random.randrange(1, 255) class PacketError(Exception): pass class Packet(dict): """Packet acts like a standard python map to provide simple access to the RADIUS attributes. Since RADIUS allows for repeated attributes the value will always be a sequence. pyrad makes sure to preserve the ordering when encoding and decoding packets. There are two ways to use the map intereface: if attribute names are used pyrad take care of en-/decoding data. If the attribute type number (or a vendor ID/attribute type tuple for vendor attributes) is used you work with the raw data. Normally you will not use this class directly, but one of the :obj:`AuthPacket` or :obj:`AcctPacket` classes. """ def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ dict.__init__(self) self.code = code if id is not None: self.id = id else: self.id = CreateID() if not isinstance(secret, six.binary_type): raise TypeError('secret must be a binary string') self.secret = secret if authenticator is not None and \ not isinstance(authenticator, six.binary_type): raise TypeError('authenticator must be a binary string') self.authenticator = authenticator if 'dict' in attributes: self.dict = attributes['dict'] if 'packet' in attributes: self.DecodePacket(attributes['packet']) for (key, value) in attributes.items(): if key in ['dict', 'fd', 'packet']: continue key = key.replace('_', '-') self.AddAttribute(key, value) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return Packet(id=self.id, secret=self.secret, authenticator=self.authenticator, dict=self.dict, **attributes) def _DecodeValue(self, attr, value): if attr.values.HasBackward(value): return attr.values.GetBackward(value) else: return tools.DecodeAttr(attr.type, value) def _EncodeValue(self, attr, value): if attr.values.HasForward(value): return attr.values.GetForward(value) else: return tools.EncodeAttr(attr.type, value) def _EncodeKeyValues(self, key, values): if not isinstance(key, str): return (key, values) attr = self.dict.attributes[key] if attr.vendor: key = (self.dict.vendors.GetForward(attr.vendor), attr.code) else: key = attr.code return (key, [self._EncodeValue(attr, v) for v in values]) def _EncodeKey(self, key): if not isinstance(key, str): return key attr = self.dict.attributes[key] if attr.vendor: return (self.dict.vendors.GetForward(attr.vendor), attr.code) else: return attr.code def _DecodeKey(self, key): """Turn a key into a string if possible""" if self.dict.attrindex.HasBackward(key): return self.dict.attrindex.GetBackward(key) return key def AddAttribute(self, key, value): """Add an attribute to the packet. :param key: attribute name or identification :type key: string, attribute code or (vendor code, attribute code) tuple :param value: value :type value: depends on type of attribute """ (key, value) = self._EncodeKeyValues(key, [value]) value = value[0] self.setdefault(key, []).append(value) def __getitem__(self, key): if not isinstance(key, six.string_types): return dict.__getitem__(self, key) values = dict.__getitem__(self, self._EncodeKey(key)) attr = self.dict.attributes[key] res = [] for v in values: res.append(self._DecodeValue(attr, v)) return res def __contains__(self, key): try: return dict.__contains__(self, self._EncodeKey(key)) except KeyError: return False has_key = __contains__ def __delitem__(self, key): dict.__delitem__(self, self._EncodeKey(key)) def __setitem__(self, key, item): if isinstance(key, six.string_types): (key, item) = self._EncodeKeyValues(key, [item]) dict.__setitem__(self, key, item) else: assert isinstance(item, list) dict.__setitem__(self, key, item) def keys(self): return [self._DecodeKey(key) for key in dict.keys(self)] @staticmethod def CreateAuthenticator(): """Create a packet autenticator. All RADIUS packets contain a sixteen byte authenticator which is used to authenticate replies from the RADIUS server and in the password hiding algorithm. This function returns a suitable random string that can be used as an authenticator. :return: valid packet authenticator :rtype: binary string """ data = [] for i in range(16): data.append(random.randrange(0, 256)) if six.PY3: return bytes(data) else: return ''.join(chr(b) for b in data) def CreateID(self): """Create a packet ID. All RADIUS requests have a ID which is used to identify a request. This is used to detect retries and replay attacks. This function returns a suitable random number that can be used as ID. :return: ID number :rtype: integer """ return random.randrange(0, 256) def ReplyPacket(self): """Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string """ assert(self.authenticator) assert(self.secret) attr = self._PktEncodeAttributes() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) authenticator = md5_constructor(header[0:4] + self.authenticator + attr + self.secret).digest() return header + authenticator + attr def VerifyReply(self, reply, rawreply=None): if reply.id != self.id: return False if rawreply is None: rawreply = reply.ReplyPacket() hash = md5_constructor(rawreply[0:4] + self.authenticator + rawreply[20:] + self.secret).digest() if hash != rawreply[4:20]: return False return True def _PktEncodeAttribute(self, key, value): if isinstance(key, tuple): value = struct.pack('!L', key[0]) + \ self._PktEncodeAttribute(key[1], value) key = 26 return struct.pack('!BB', key, (len(value) + 2)) + value def _PktEncodeAttributes(self): result = six.b('') for (code, datalst) in self.items(): for data in datalst: result += self._PktEncodeAttribute(code, data) return result def _PktDecodeVendorAttribute(self, data): # Check if this packet is long enough to be in the # RFC2865 recommended form if len(data) < 6: return (26, data) (vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3] # Another sanity check if len(data) != length + 4: return (26, data) return ((vendor, type), data[6:]) def DecodePacket(self, packet): """Initialize the object from raw packet data. Decode a packet as received from the network and decode it. :param packet: raw packet :type packet: string""" try: (self.code, self.id, length, self.authenticator) = \ struct.unpack('!BBH16s', packet[0:20]) except struct.error: raise PacketError('Packet header is corrupt') if len(packet) != length: raise PacketError('Packet has invalid length') if length > 8192: raise PacketError('Packet length is too long (%d)' % length) self.clear() packet = packet[20:] while packet: try: (key, attrlen) = struct.unpack('!BB', packet[0:2]) except struct.error: raise PacketError('Attribute header is corrupt') if attrlen < 2: raise PacketError( 'Attribute length is too small (%d)' % attrlen) value = packet[2:attrlen] if key == 26: (key, value) = self._PktDecodeVendorAttribute(value) self.setdefault(key, []).append(value) packet = packet[attrlen:] class AuthPacket(Packet): def __init__(self, code=AccessRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param code: packet type code :type code: integer (8bits) :param id: packet identifaction number :type id: integer (8 bits) :param secret: secret needed to communicate with a RADIUS server :type secret: string :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AuthPacket(AccessAccept, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH16s', self.code, self.id, (20 + len(attr)), self.authenticator) return header + attr def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8') def PwCrypt(self, password): """Obfuscate password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. If no authenticator has been set before calling PwCrypt one is created automatically. Changing the authenticator after setting a password that has been encrypted using this function will not work. :param password: plaintext password :type password: unicode stringn :return: obfuscated version of the password :rtype: binary string """ if self.authenticator is None: self.authenticator = self.CreateAuthenticator() if isinstance(password, six.text_type): password = password.encode('utf-8') buf = password if len(password) % 16 != 0: buf += six.b('\x00') * (16 - (len(password) % 16)) hash = md5_constructor(self.secret + self.authenticator).digest() result = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): result += bytes((hash[i] ^ buf[i],)) else: for i in range(16): result += chr(ord(hash[i]) ^ ord(buf[i])) last = result[-16:] buf = buf[16:] return result class AcctPacket(Packet): """RADIUS accounting packets. This class is a specialization of the generic :obj:`Packet` class for accounting packets. """ def __init__(self, code=AccountingRequest, id=None, secret=six.b(''), authenticator=None, **attributes): """Constructor :param dict: RADIUS dictionary :type dict: pyrad.dictionary.Dictionary class :param secret: secret needed to communicate with a RADIUS server :type secret: string :param id: packet identifaction number :type id: integer (8 bits) :param code: packet type code :type code: integer (8bits) :param packet: raw packet to decode :type packet: string """ Packet.__init__(self, code, id, secret, authenticator, **attributes) if 'packet' in attributes: self.raw_packet = attributes['packet'] def CreateReply(self, **attributes): """Create a new packet as a reply to this one. This method makes sure the authenticator and secret are copied over to the new instance. """ return AcctPacket(AccountingResponse, self.id, self.secret, self.authenticator, dict=self.dict, **attributes) def VerifyAcctRequest(self): """Verify request authenticator. :return: True if verification failed else False :rtype: boolean """ assert(self.raw_packet) hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') + self.raw_packet[20:] + self.secret).digest() return hash == self.authenticator def RequestPacket(self): """Create a ready-to-transmit authentication request packet. Return a RADIUS packet which can be directly transmitted to a RADIUS server. :return: raw packet :rtype: string """ attr = self._PktEncodeAttributes() if self.id is None: self.id = self.CreateID() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr + self.secret).digest() return header + self.authenticator + attr def CreateID(): """Generate a packet ID. :return: packet ID :rtype: 8 bit integer """ global CurrentID CurrentID = (CurrentID + 1) % 256 return CurrentID
./CrossVul/dataset_final_sorted/CWE-330/py/bad_5563_1
crossvul-python_data_good_18_0
from django.apps import AppConfig from django.core import checks from .checks import check_deprecated_settings class AnymailBaseConfig(AppConfig): name = 'anymail' verbose_name = "Anymail" def ready(self): checks.register(check_deprecated_settings)
./CrossVul/dataset_final_sorted/CWE-532/py/good_18_0
crossvul-python_data_bad_18_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-532/py/bad_18_1
crossvul-python_data_good_18_2
import warnings import six from django.http import HttpResponse from django.utils.crypto import constant_time_compare from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_secret', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs if not self.basic_auth: # Temporarily allow deprecated WEBHOOK_AUTHORIZATION setting self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_SECRET in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: request_auth = get_request_basic_auth(request) # Use constant_time_compare to avoid timing attack on basic auth. (It's OK that any() # can terminate early: we're not trying to protect how many auth strings are allowed, # just the contents of each individual auth string.) auth_ok = any(constant_time_compare(request_auth, allowed_auth) for allowed_auth in self.basic_auth) if not auth_ok: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) Security note: use django.utils.crypto.constant_time_compare for string comparisons, to avoid exposing your validation to a timing attack. """ # if not constant_time_compare(request.POST['signature'], expected_signature): # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. Subclasses must override with class attr. E.g.: esp_name = "Postmark" esp_name = "SendGrid" # (use ESP's preferred capitalization) """ raise NotImplementedError("%s.%s must declare esp_name class attr" % (self.__class__.__module__, self.__class__.__name__))
./CrossVul/dataset_final_sorted/CWE-532/py/good_18_2
crossvul-python_data_bad_3972_0
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from abc import ABC, abstractmethod import attr import collections import enum import fnmatch import itertools import logging import math import os import pathlib import platform from curtin import storage_config from curtin.util import human2bytes from probert.storage import StorageInfo log = logging.getLogger('subiquity.models.filesystem') def _set_backlinks(obj): for field in attr.fields(type(obj)): backlink = field.metadata.get('backlink') if backlink is None: continue v = getattr(obj, field.name) if v is None: continue if not isinstance(v, (list, set)): v = [v] for vv in v: b = getattr(vv, backlink, None) if isinstance(b, list): b.append(obj) elif isinstance(b, set): b.add(obj) else: setattr(vv, backlink, obj) def _remove_backlinks(obj): for field in attr.fields(type(obj)): backlink = field.metadata.get('backlink') if backlink is None: continue v = getattr(obj, field.name) if v is None: continue if not isinstance(v, (list, set)): v = [v] for vv in v: b = getattr(vv, backlink, None) if isinstance(b, list): b.remove(obj) elif isinstance(b, set): b.remove(obj) else: setattr(vv, backlink, None) _type_to_cls = {} def fsobj__repr(obj): args = [] for f in attr.fields(type(obj)): if f.name.startswith("_"): continue v = getattr(obj, f.name) if v is f.default: continue if f.metadata.get('ref', False): v = v.id elif f.metadata.get('reflist', False): if isinstance(v, set): delims = "{}" else: delims = "[]" v = delims[0] + ", ".join(vv.id for vv in v) + delims[1] elif f.metadata.get('redact', False): v = "<REDACTED>" else: v = repr(v) args.append("{}={}".format(f.name, v)) return "{}({})".format(type(obj).__name__, ", ".join(args)) def fsobj(typ): def wrapper(c): c.__attrs_post_init__ = _set_backlinks c.type = attributes.const(typ) c.id = attributes.idfield(typ) c._m = attr.ib(repr=None, default=None) c = attr.s(cmp=False, repr=False)(c) c.__repr__ = fsobj__repr _type_to_cls[typ] = c return c return wrapper def dependencies(obj): for f in attr.fields(type(obj)): v = getattr(obj, f.name) if not v: continue elif f.metadata.get('ref', False): yield v elif f.metadata.get('reflist', False): yield from v def reverse_dependencies(obj): for f in attr.fields(type(obj)): if not f.metadata.get('is_backlink', False): continue v = getattr(obj, f.name) if isinstance(v, (list, set)): yield from v elif v is not None: yield v @attr.s(cmp=False) class RaidLevel: name = attr.ib() value = attr.ib() min_devices = attr.ib() supports_spares = attr.ib(default=True) raidlevels = [ RaidLevel(_("0 (striped)"), "raid0", 2, False), RaidLevel(_("1 (mirrored)"), "raid1", 2), RaidLevel(_("5"), "raid5", 3), RaidLevel(_("6"), "raid6", 4), RaidLevel(_("10"), "raid10", 4), ] def _raidlevels_by_value(): r = {level.value: level for level in raidlevels} for n in 0, 1, 5, 6, 10: r[str(n)] = r[n] = r["raid"+str(n)] r["stripe"] = r["raid0"] r["mirror"] = r["raid1"] return r raidlevels_by_value = _raidlevels_by_value() HUMAN_UNITS = ['B', 'K', 'M', 'G', 'T', 'P'] def humanize_size(size): if size == 0: return "0B" p = int(math.floor(math.log(size, 2) / 10)) # We want to truncate the non-integral part, not round to nearest. s = "{:.17f}".format(size / 2 ** (10 * p)) i = s.index('.') s = s[:i + 4] return s + HUMAN_UNITS[int(p)] def dehumanize_size(size): # convert human 'size' to integer size_in = size if not size: raise ValueError("input cannot be empty") if not size[-1].isdigit(): suffix = size[-1].upper() size = size[:-1] else: suffix = None parts = size.split('.') if len(parts) > 2: raise ValueError(_("{!r} is not valid input").format(size_in)) elif len(parts) == 2: div = 10 ** len(parts[1]) size = parts[0] + parts[1] else: div = 1 try: num = int(size) except ValueError: raise ValueError(_("{!r} is not valid input").format(size_in)) if suffix is not None: if suffix not in HUMAN_UNITS: raise ValueError( "unrecognized suffix {!r} in {!r}".format(size_in[-1], size_in)) mult = 2 ** (10 * HUMAN_UNITS.index(suffix)) else: mult = 1 if num < 0: raise ValueError("{!r}: cannot be negative".format(size_in)) return num * mult // div DEFAULT_CHUNK = 512 # The calculation of how much of a device mdadm uses for raid is more than a # touch ridiculous. What follows is a translation of the code at: # https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/tree/super1.c, # specifically choose_bm_space and the end of validate_geometry1. Note that # that calculations are in terms of 512-byte sectors. # # We make some assumptions about the defaults mdadm uses but mostly that the # default metadata version is 1.2, and other formats use less space. # # Note that data_offset is computed for the first disk mdadm examines and then # used for all devices, so the order matters! (Well, if the size of the # devices vary, which is not normal but also not something we prevent). # # All this is tested against reality in ./scripts/get-raid-sizes.py def calculate_data_offset_bytes(devsize): # Convert to sectors to make it easier to compare this code to mdadm's (we # convert back at the end) devsize >>= 9 devsize = align_down(devsize, DEFAULT_CHUNK) # conversion of choose_bm_space: if devsize < 64*2: bmspace = 0 elif devsize - 64*2 >= 200*1024*1024*2: bmspace = 128*2 elif devsize - 4*2 > 8*1024*1024*2: bmspace = 64*2 else: bmspace = 4*2 # From the end of validate_geometry1, assuming metadata 1.2. headroom = 128*1024*2 while (headroom << 10) > devsize and headroom / 2 >= DEFAULT_CHUNK*2*2: headroom >>= 1 data_offset = 12*2 + bmspace + headroom log.debug( "get_raid_size: adjusting for %s sectors of overhead", data_offset) data_offset = align_up(data_offset, 2*1024) # convert back to bytes return data_offset << 9 def raid_device_sort(devices): # Because the device order matters to mdadm, we sort consistently but # arbitrarily when computing the size and when rendering the config (so # curtin passes the devices to mdadm in the order we calculate the size # for) return sorted(devices, key=lambda d: d.id) def get_raid_size(level, devices): if len(devices) == 0: return 0 devices = raid_device_sort(devices) data_offset = calculate_data_offset_bytes(devices[0].size) sizes = [align_down(dev.size - data_offset) for dev in devices] min_size = min(sizes) if min_size <= 0: return 0 if level == "raid0": return sum(sizes) elif level == "raid1": return min_size elif level == "raid5": return min_size * (len(devices) - 1) elif level == "raid6": return min_size * (len(devices) - 2) elif level == "raid10": return min_size * (len(devices) // 2) else: raise ValueError("unknown raid level %s" % level) # These are only defaults but curtin does not let you change/specify # them at this time. LVM_OVERHEAD = (1 << 20) LVM_CHUNK_SIZE = 4 * (1 << 20) def get_lvm_size(devices, size_overrides={}): r = 0 for d in devices: r += align_down( size_overrides.get(d, d.size) - LVM_OVERHEAD, LVM_CHUNK_SIZE) return r def _conv_size(s): if isinstance(s, str): if '%' in s: return s return int(human2bytes(s)) return s class attributes: # Just a namespace to hang our wrappers around attr.ib() off. @staticmethod def idfield(base): i = 0 def factory(): nonlocal i r = "%s-%s" % (base, i) i += 1 return r return attr.ib(default=attr.Factory(factory)) @staticmethod def ref(*, backlink=None): metadata = {'ref': True} if backlink: metadata['backlink'] = backlink return attr.ib(metadata=metadata) @staticmethod def reflist(*, backlink=None, default=attr.NOTHING): metadata = {'reflist': True} if backlink: metadata['backlink'] = backlink return attr.ib(metadata=metadata, default=default) @staticmethod def backlink(*, default=None): return attr.ib( init=False, default=default, metadata={'is_backlink': True}) @staticmethod def const(value): return attr.ib(default=value) @staticmethod def size(): return attr.ib(converter=_conv_size) @staticmethod def ptable(): def conv(val): if val == "dos": val = "msdos" return val return attr.ib(default=None, converter=conv) def asdict(inst): r = collections.OrderedDict() for field in attr.fields(type(inst)): if field.name.startswith('_'): continue m = getattr(inst, 'serialize_' + field.name, None) if m: r.update(m()) else: v = getattr(inst, field.name) if v is not None: if field.metadata.get('ref', False): r[field.name] = v.id elif field.metadata.get('reflist', False): r[field.name] = [elem.id for elem in v] else: r[field.name] = v return r # This code is not going to make much sense unless you have read # http://curtin.readthedocs.io/en/latest/topics/storage.html. The # Disk, Partition etc classes correspond to entries in curtin's # storage config list. They are mostly 'dumb data', all the logic is # in the FilesystemModel or FilesystemController classes. class DeviceAction(enum.Enum): INFO = _("Info") EDIT = _("Edit") REFORMAT = _("Reformat") PARTITION = _("Add Partition") CREATE_LV = _("Create Logical Volume") FORMAT = _("Format") REMOVE = _("Remove from RAID/LVM") DELETE = _("Delete") TOGGLE_BOOT = _("Make Boot Device") def _generic_can_EDIT(obj): cd = obj.constructed_device() if cd is None: return True return _( "Cannot edit {selflabel} as it is part of the {cdtype} " "{cdname}.").format( selflabel=obj.label, cdtype=cd.desc(), cdname=cd.label) def _generic_can_REMOVE(obj): cd = obj.constructed_device() if cd is None: return False if cd.preserve: return _("Cannot remove {selflabel} from pre-existing {cdtype} " "{cdlabel}.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label) if isinstance(cd, Raid): if obj in cd.spare_devices: return True min_devices = raidlevels_by_value[cd.raidlevel].min_devices if len(cd.devices) == min_devices: return _( "Removing {selflabel} would leave the {cdtype} {cdlabel} with " "less than {min_devices} devices.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label, min_devices=min_devices) elif isinstance(cd, LVM_VolGroup): if len(cd.devices) == 1: return _( "Removing {selflabel} would leave the {cdtype} {cdlabel} with " "no devices.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label) return True def _generic_can_DELETE(obj): cd = obj.constructed_device() if cd is None: return True return _( "Cannot delete {selflabel} as it is part of the {cdtype} " "{cdname}.").format( selflabel=obj.label, cdtype=cd.desc(), cdname=cd.label) @attr.s(cmp=False) class _Formattable(ABC): # Base class for anything that can be formatted and mounted, # e.g. a disk or a RAID or a partition. @property @abstractmethod def label(self): pass @property def annotations(self): preserve = getattr(self, 'preserve', None) if preserve is None: return [] elif preserve: return [_("existing")] else: return [_("new")] # Filesystem _fs = attributes.backlink() # Raid or LVM_VolGroup for now, but one day ZPool, BCache... _constructed_device = attributes.backlink() def usage_labels(self): cd = self.constructed_device() if cd is not None: return [ _("{component_name} of {desc} {name}").format( component_name=cd.component_name, desc=cd.desc(), name=cd.name), ] fs = self.fs() if fs is not None: if fs.preserve: format_desc = _("already formatted as {fstype}") elif self.original_fstype() is not None: format_desc = _("to be reformatted as {fstype}") else: format_desc = _("to be formatted as {fstype}") r = [format_desc.format(fstype=fs.fstype)] if self._m.is_mounted_filesystem(fs.fstype): m = fs.mount() if m: r.append(_("mounted at {path}").format(path=m.path)) elif getattr(self, 'flag', None) != "boot": r.append(_("not mounted")) elif fs.preserve: if fs.mount() is None: r.append(_("unused")) else: r.append(_("used")) return r else: return [_("unused")] def _is_entirely_used(self): return self._fs is not None or self._constructed_device is not None def fs(self): return self._fs def original_fstype(self): for action in self._m._orig_config: if action['type'] == 'format' and action['volume'] == self.id: return action['fstype'] for action in self._m._orig_config: if action['id'] == self.id and action.get('flag') == 'swap': return 'swap' return None def constructed_device(self, skip_dm_crypt=True): cd = self._constructed_device if cd is None: return None elif cd.type == "dm_crypt" and skip_dm_crypt: return cd._constructed_device else: return cd @property @abstractmethod def supported_actions(self): pass def action_possible(self, action): assert action in self.supported_actions r = getattr(self, "_can_" + action.name) if isinstance(r, bool): return r, None elif isinstance(r, str): return False, r else: return r @property @abstractmethod def ok_for_raid(self): pass @property @abstractmethod def ok_for_lvm_vg(self): pass # Nothing is put in the first and last megabytes of the disk to allow # space for the GPT data. GPT_OVERHEAD = 2 * (1 << 20) @attr.s(cmp=False) class _Device(_Formattable, ABC): # Anything that can have partitions, e.g. a disk or a RAID. @property @abstractmethod def size(self): pass # [Partition] _partitions = attributes.backlink(default=attr.Factory(list)) def dasd(self): return None def ptable_for_new_partition(self): if self.ptable is not None: return self.ptable for action in self._m._orig_config: if action['id'] == self.id: if action.get('ptable') == 'vtoc': return action['ptable'] if self.dasd() is not None: return 'vtoc' return 'gpt' def partitions(self): return self._partitions @property def used(self): if self._is_entirely_used(): return self.size r = 0 for p in self._partitions: if p.flag == "extended": continue r += p.size return r @property def empty(self): return self.used == 0 @property def available_for_partitions(self): return self.size - GPT_OVERHEAD @property def free_for_partitions(self): return self.available_for_partitions - self.used def available(self): # A _Device is available if: # 1) it is not part of a device like a RAID or LVM or zpool or ... # 2) if it is formatted, it is available if it is formatted with fs # that needs to be mounted and is not mounted # 3) if it is not formatted, it is available if it has free # space OR at least one partition is not formatted or is formatted # with a fs that needs to be mounted and is not mounted if self._constructed_device is not None: return False if self._fs is not None: return self._fs._available() if self.free_for_partitions > 0: if not self._has_preexisting_partition(): return True for p in self._partitions: if p.available(): return True return False def has_unavailable_partition(self): for p in self._partitions: if not p.available(): return True return False def _has_preexisting_partition(self): for p in self._partitions: if p.preserve: return True else: return False @property def _can_DELETE(self): mounted_partitions = 0 for p in self._partitions: if p.fs() and p.fs().mount(): mounted_partitions += 1 elif p.constructed_device(): cd = p.constructed_device() return _( "Cannot delete {selflabel} as partition {partnum} is part " "of the {cdtype} {cdname}.").format( selflabel=self.label, partnum=p._number, cdtype=cd.desc(), cdname=cd.label, ) if mounted_partitions > 1: return _( "Cannot delete {selflabel} because it has {count} mounted " "partitions.").format( selflabel=self.label, count=mounted_partitions) elif mounted_partitions == 1: return _( "Cannot delete {selflabel} because it has 1 mounted partition." ).format(selflabel=self.label) else: return _generic_can_DELETE(self) @fsobj("dasd") class Dasd: device_id = attr.ib() blocksize = attr.ib() disk_layout = attr.ib() label = attr.ib(default=None) mode = attr.ib(default=None) preserve = attr.ib(default=False) @fsobj("disk") class Disk(_Device): ptable = attributes.ptable() serial = attr.ib(default=None) wwn = attr.ib(default=None) multipath = attr.ib(default=None) path = attr.ib(default=None) model = attr.ib(default=None) wipe = attr.ib(default=None) preserve = attr.ib(default=False) name = attr.ib(default="") grub_device = attr.ib(default=False) device_id = attr.ib(default=None) _info = attr.ib(default=None) def info_for_display(self): bus = self._info.raw.get('ID_BUS', None) major = self._info.raw.get('MAJOR', None) if bus is None and major == '253': bus = 'virtio' devpath = self._info.raw.get('DEVPATH', self.path) # XXX probert should be doing this!! rotational = '1' try: dev = os.path.basename(devpath) rfile = '/sys/class/block/{}/queue/rotational'.format(dev) rotational = open(rfile, 'r').read().strip() except (PermissionError, FileNotFoundError, IOError): log.exception('WARNING: Failed to read file {}'.format(rfile)) pass dinfo = { 'bus': bus, 'devname': self.path, 'devpath': devpath, 'model': self.model or 'unknown', 'serial': self.serial or 'unknown', 'wwn': self.wwn or 'unknown', 'multipath': self.multipath or 'unknown', 'size': self.size, 'humansize': humanize_size(self.size), 'vendor': self._info.vendor or 'unknown', 'rotational': 'true' if rotational == '1' else 'false', } return dinfo @property def size(self): return align_down(self._info.size) @property def annotations(self): return [] def desc(self): if self.multipath: return _("multipath device") return _("local disk") @property def label(self): if self.multipath: return self.wwn return self.serial or self.path def dasd(self): return self._m._one(type='dasd', device_id=self.device_id) def _can_be_boot_disk(self): bl = self._m.bootloader if self._has_preexisting_partition(): if bl == Bootloader.BIOS: if self.ptable == "msdos": return True else: return self._partitions[0].flag == "bios_grub" else: flag = {Bootloader.UEFI: "boot", Bootloader.PREP: "prep"}[bl] for p in self._partitions: if p.flag == flag: return True return False else: return True @property def supported_actions(self): actions = [ DeviceAction.INFO, DeviceAction.REFORMAT, DeviceAction.PARTITION, DeviceAction.FORMAT, DeviceAction.REMOVE, ] if self._m.bootloader != Bootloader.NONE: actions.append(DeviceAction.TOGGLE_BOOT) return actions _can_INFO = True @property def _can_REFORMAT(self): if len(self._partitions) == 0: return False for p in self._partitions: if p._constructed_device is not None: return False return True @property def _can_PARTITION(self): if self._has_preexisting_partition(): return False if self.free_for_partitions <= 0: return False if self.ptable == 'vtoc' and len(self._partitions) >= 3: return False return True _can_FORMAT = property( lambda self: len(self._partitions) == 0 and self._constructed_device is None) _can_REMOVE = property(_generic_can_REMOVE) def _is_boot_device(self): bl = self._m.bootloader if bl == Bootloader.NONE: return False elif bl == Bootloader.BIOS: return self.grub_device elif bl in [Bootloader.PREP, Bootloader.UEFI]: for p in self._partitions: if p.grub_device: return True return False @property def _can_TOGGLE_BOOT(self): if self._is_boot_device(): for disk in self._m.all_disks(): if disk is not self and disk._is_boot_device(): return True return False elif self._fs is not None or self._constructed_device is not None: return False else: return self._can_be_boot_disk() @property def ok_for_raid(self): if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False if len(self._partitions) > 0: return False return True ok_for_lvm_vg = ok_for_raid @fsobj("partition") class Partition(_Formattable): device = attributes.ref(backlink="_partitions") # Disk size = attributes.size() wipe = attr.ib(default=None) flag = attr.ib(default=None) number = attr.ib(default=None) preserve = attr.ib(default=False) grub_device = attr.ib(default=False) @property def annotations(self): r = super().annotations if self.flag == "prep": r.append("PReP") if self.preserve: if self.grub_device: r.append(_("configured")) else: r.append(_("unconfigured")) elif self.flag == "boot": if self.fs() and self.fs().mount(): r.append(_("primary ESP")) elif self.grub_device: r.append(_("backup ESP")) else: r.append(_("unused ESP")) elif self.flag == "bios_grub": if self.preserve: if self.device.grub_device: r.append(_("configured")) else: r.append(_("unconfigured")) r.append("bios_grub") elif self.flag == "extended": r.append(_("extended")) elif self.flag == "logical": r.append(_("logical")) return r def usage_labels(self): if self.flag == "prep" or self.flag == "bios_grub": return [] return super().usage_labels() def desc(self): return _("partition of {device}").format(device=self.device.desc()) @property def label(self): return _("partition {number} of {device}").format( number=self._number, device=self.device.label) @property def short_label(self): return _("partition {number}").format(number=self._number) def available(self): if self.flag in ['bios_grub', 'prep'] or self.grub_device: return False if self._constructed_device is not None: return False if self._fs is None: return True return self._fs._available() def serialize_number(self): return {'number': self._number} @property def _number(self): if self.preserve: return self.number else: return self.device._partitions.index(self) + 1 supported_actions = [ DeviceAction.EDIT, DeviceAction.REMOVE, DeviceAction.DELETE, ] _can_EDIT = property(_generic_can_EDIT) _can_REMOVE = property(_generic_can_REMOVE) @property def _can_DELETE(self): if self.device._has_preexisting_partition(): return _("Cannot delete a single partition from a device that " "already has partitions.") if self.flag in ('boot', 'bios_grub', 'prep'): return _("Cannot delete required bootloader partition") return _generic_can_DELETE(self) @property def ok_for_raid(self): if self.flag in ('boot', 'bios_grub', 'prep'): return False if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False return True ok_for_lvm_vg = ok_for_raid @fsobj("raid") class Raid(_Device): name = attr.ib() raidlevel = attr.ib(converter=lambda x: raidlevels_by_value[x].value) devices = attributes.reflist(backlink="_constructed_device") def serialize_devices(self): # Surprisingly, the order of devices passed to mdadm --create # matters (see get_raid_size) so we sort devices here the same # way get_raid_size does. return {'devices': [d.id for d in raid_device_sort(self.devices)]} spare_devices = attributes.reflist( backlink="_constructed_device", default=attr.Factory(set)) preserve = attr.ib(default=False) ptable = attributes.ptable() @property def size(self): return get_raid_size(self.raidlevel, self.devices) @property def available_for_partitions(self): # For some reason, the overhead on RAID devices seems to be # higher (may be related to alignment of underlying # partitions) return self.size - 2*GPT_OVERHEAD @property def label(self): return self.name def desc(self): return _("software RAID {level}").format(level=self.raidlevel[4:]) supported_actions = [ DeviceAction.EDIT, DeviceAction.PARTITION, DeviceAction.FORMAT, DeviceAction.REMOVE, DeviceAction.DELETE, DeviceAction.REFORMAT, ] @property def _can_EDIT(self): if self.preserve: return _("Cannot edit pre-existing RAIDs.") elif len(self._partitions) > 0: return _( "Cannot edit {selflabel} because it has partitions.").format( selflabel=self.label) else: return _generic_can_EDIT(self) _can_PARTITION = Disk._can_PARTITION _can_REFORMAT = Disk._can_REFORMAT _can_FORMAT = property( lambda self: len(self._partitions) == 0 and self._constructed_device is None) _can_REMOVE = property(_generic_can_REMOVE) @property def ok_for_raid(self): if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False if len(self._partitions) > 0: return False return True ok_for_lvm_vg = ok_for_raid # What is a device that makes up this device referred to as? component_name = "component" @fsobj("lvm_volgroup") class LVM_VolGroup(_Device): name = attr.ib() devices = attributes.reflist(backlink="_constructed_device") preserve = attr.ib(default=False) @property def size(self): # Should probably query actual size somehow for an existing VG! return get_lvm_size(self.devices) @property def available_for_partitions(self): return self.size @property def annotations(self): r = super().annotations member = next(iter(self.devices)) if member.type == "dm_crypt": r.append(_("encrypted")) return r @property def label(self): return self.name def desc(self): return _("LVM volume group") supported_actions = [ DeviceAction.EDIT, DeviceAction.CREATE_LV, DeviceAction.DELETE, ] @property def _can_EDIT(self): if self.preserve: return _("Cannot edit pre-existing volume groups.") elif len(self._partitions) > 0: return _( "Cannot edit {selflabel} because it has logical " "volumes.").format( selflabel=self.label) else: return _generic_can_EDIT(self) _can_CREATE_LV = property( lambda self: not self.preserve and self.free_for_partitions > 0) ok_for_raid = False ok_for_lvm_vg = False # What is a device that makes up this device referred to as? component_name = "PV" @fsobj("lvm_partition") class LVM_LogicalVolume(_Formattable): name = attr.ib() volgroup = attributes.ref(backlink="_partitions") # LVM_VolGroup size = attributes.size() preserve = attr.ib(default=False) def serialize_size(self): return {'size': "{}B".format(self.size)} def available(self): if self._constructed_device is not None: return False if self._fs is None: return True return self._fs._available() @property def flag(self): return None # hack! def desc(self): return _("LVM logical volume") @property def short_label(self): return self.name label = short_label supported_actions = [ DeviceAction.EDIT, DeviceAction.DELETE, ] _can_EDIT = True @property def _can_DELETE(self): if self.volgroup._has_preexisting_partition(): return _("Cannot delete a single logical volume from a volume " "group that already has logical volumes.") return True ok_for_raid = False ok_for_lvm_vg = False LUKS_OVERHEAD = 16*(2**20) @fsobj("dm_crypt") class DM_Crypt: volume = attributes.ref(backlink="_constructed_device") # _Formattable key = attr.ib(metadata={'redact': True}) dm_name = attr.ib(default=None) preserve = attr.ib(default=False) _constructed_device = attributes.backlink() def constructed_device(self): return self._constructed_device @property def size(self): return self.volume.size - LUKS_OVERHEAD @fsobj("format") class Filesystem: fstype = attr.ib() volume = attributes.ref(backlink="_fs") # _Formattable label = attr.ib(default=None) uuid = attr.ib(default=None) preserve = attr.ib(default=False) _mount = attributes.backlink() def mount(self): return self._mount def _available(self): # False if mounted or if fs does not require a mount, True otherwise. if self._mount is None: if self.preserve: return True else: return FilesystemModel.is_mounted_filesystem(self.fstype) else: return False @fsobj("mount") class Mount: device = attributes.ref(backlink="_mount") # Filesystem path = attr.ib() def can_delete(self): # Can't delete mount of /boot/efi or swap, anything else is fine. if not self.path: # swap mount return False if not isinstance(self.device.volume, Partition): # Can't be /boot/efi if volume is not a partition return True if self.device.volume.flag == "boot": # /boot/efi return False return True def align_up(size, block_size=1 << 20): return (size + block_size - 1) & ~(block_size - 1) def align_down(size, block_size=1 << 20): return size & ~(block_size - 1) class Bootloader(enum.Enum): NONE = "NONE" # a system where the bootloader is external, e.g. s390x BIOS = "BIOS" # BIOS, where the bootloader dd-ed to the start of a device UEFI = "UEFI" # UEFI, ESPs and /boot/efi and all that (amd64 and arm64) PREP = "PREP" # ppc64el, which puts grub on a PReP partition class FilesystemModel(object): lower_size_limit = 128 * (1 << 20) target = None @classmethod def is_mounted_filesystem(self, fstype): if fstype in [None, 'swap']: return False else: return True def _probe_bootloader(self): # This will at some point change to return a list so that we can # configure BIOS _and_ UEFI on amd64 systems. if os.path.exists('/sys/firmware/efi'): return Bootloader.UEFI elif platform.machine().startswith("ppc64"): return Bootloader.PREP elif platform.machine() == "s390x": return Bootloader.NONE else: return Bootloader.BIOS def __init__(self): self.bootloader = self._probe_bootloader() self._probe_data = None self.reset() def reset(self): if self._probe_data is not None: self._orig_config = storage_config.extract_storage_config( self._probe_data)["storage"]["config"] self._actions = self._actions_from_config( self._orig_config, self._probe_data['blockdev']) else: self._orig_config = [] self._actions = [] self.swap = None self.grub = None def _make_matchers(self, match): matchers = [] def match_serial(disk): if disk.serial is not None: return fnmatch.fnmatchcase(disk.serial, match['serial']) def match_model(disk): if disk.model is not None: return fnmatch.fnmatchcase(disk.model, match['model']) def match_path(disk): if disk.path is not None: return fnmatch.fnmatchcase(disk.path, match['path']) def match_ssd(disk): is_ssd = disk.info_for_display()['rotational'] == 'false' return is_ssd == match['ssd'] if 'serial' in match: matchers.append(match_serial) if 'model' in match: matchers.append(match_model) if 'path' in match: matchers.append(match_path) if 'ssd' in match: matchers.append(match_ssd) return matchers def disk_for_match(self, disks, match): matchers = self._make_matchers(match) candidates = [] for candidate in disks: for matcher in matchers: if not matcher(candidate): break else: candidates.append(candidate) if match.get('size') == 'largest': candidates.sort(key=lambda d: d.size, reverse=True) if candidates: return candidates[0] return None def apply_autoinstall_config(self, ai_config): disks = self.all_disks() for action in ai_config: if action['type'] == 'disk': disk = None if 'serial' in action: disk = self._one(type='disk', serial=action['serial']) elif 'path' in action: disk = self._one(type='disk', path=action['path']) else: match = action.pop('match', {}) disk = self.disk_for_match(disks, match) if disk is None: action['match'] = match if disk is None: raise Exception("{} matched no disk".format(action)) if disk not in disks: raise Exception( "{} matched {} which was already used".format( action, disk)) disks.remove(disk) action['path'] = disk.path action['serial'] = disk.serial self._actions = self._actions_from_config( ai_config, self._probe_data['blockdev'], is_autoinstall=True) for p in self._all(type="partition") + self._all(type="lvm_partition"): [parent] = list(dependencies(p)) if isinstance(p.size, int): if p.size < 0: if p is not parent.partitions()[-1]: raise Exception( "{} has negative size but is not final partition " "of {}".format(p, parent)) p.size = 0 p.size = parent.free_for_partitions elif isinstance(p.size, str): if p.size.endswith("%"): percentage = int(p.size[:-1]) p.size = align_down( parent.available_for_partitions*percentage//100) else: p.size = dehumanize_size(p.size) def _actions_from_config(self, config, blockdevs, is_autoinstall=False): """Convert curtin storage config into action instances. curtin represents storage "actions" as defined in https://curtin.readthedocs.io/en/latest/topics/storage.html. We convert each action (that we know about) into an instance of Disk, Partition, RAID, etc (unknown actions, e.g. bcache, are just ignored). We also filter out anything that can be reached from a currently mounted device. The motivation here is only to exclude the media subiquity is mounted from, so this might be a bit excessive but hey it works. Perhaps surprisingly the order of the returned actions matters. The devices are presented in the filesystem view in the reverse of the order they appear in _actions, which means that e.g. a RAID appears higher up the list than the disks is is composed of. This is quite important as it makes "unpeeling" existing compound structures easy, you just delete the top device until you only have disks left. """ byid = {} objs = [] exclusions = set() seen_multipaths = set() for action in config: if not is_autoinstall and action['type'] == 'mount': if not action['path'].startswith(self.target): # Completely ignore mounts under /target, they are # probably leftovers from a previous install # attempt. exclusions.add(byid[action['device']]) continue c = _type_to_cls.get(action['type'], None) if c is None: # Ignore any action we do not know how to process yet # (e.g. bcache) continue kw = {} for f in attr.fields(c): n = f.name if n not in action: continue v = action[n] try: if f.metadata.get('ref', False): kw[n] = byid[v] elif f.metadata.get('reflist', False): kw[n] = [byid[id] for id in v] else: kw[n] = v except KeyError: # If a dependency of the current action has been # ignored, we need to ignore the current action too # (e.g. a bcache's filesystem). continue if kw['type'] == 'disk': path = kw['path'] kw['info'] = StorageInfo({path: blockdevs[path]}) if not is_autoinstall: kw['preserve'] = True obj = byid[action['id']] = c(m=self, **kw) multipath = kw.get('multipath') if multipath: if multipath in seen_multipaths: exclusions.add(obj) else: seen_multipaths.add(multipath) objs.append(obj) while True: next_exclusions = exclusions.copy() for e in exclusions: next_exclusions.update(itertools.chain( dependencies(e), reverse_dependencies(e))) if len(exclusions) == len(next_exclusions): break exclusions = next_exclusions log.debug("exclusions %s", {e.id for e in exclusions}) objs = [o for o in objs if o not in exclusions] if not is_autoinstall: for o in objs: if o.type == "partition" and o.flag == "swap": if o._fs is None: objs.append(Filesystem( m=self, fstype="swap", volume=o, preserve=True)) return objs def _render_actions(self): # The curtin storage config has the constraint that an action must be # preceded by all the things that it depends on. We handle this by # repeatedly iterating over all actions and checking if we can emit # each action by checking if all of the actions it depends on have been # emitted. Eventually this will either emit all actions or stop making # progress -- which means there is a cycle in the definitions, # something the UI should have prevented <wink>. r = [] emitted_ids = set() def emit(obj): if isinstance(obj, Raid): log.debug( "FilesystemModel: estimated size of %s %s is %s", obj.raidlevel, obj.name, obj.size) r.append(asdict(obj)) emitted_ids.add(obj.id) def ensure_partitions(dev): for part in dev.partitions(): if part.id not in emitted_ids: if part not in work and part not in next_work: next_work.append(part) def can_emit(obj): if obj.type == "partition": ensure_partitions(obj.device) for p in obj.device.partitions(): if p._number < obj._number and p.id not in emitted_ids: return False for dep in dependencies(obj): if dep.id not in emitted_ids: if dep not in work and dep not in next_work: next_work.append(dep) if dep.type in ['disk', 'raid']: ensure_partitions(dep) return False if isinstance(obj, Mount): # Any mount actions for a parent of this one have to be emitted # first. for parent in pathlib.Path(obj.path).parents: parent = str(parent) if parent in mountpoints: if mountpoints[parent] not in emitted_ids: log.debug( "cannot emit action to mount %s until that " "for %s is emitted", obj.path, parent) return False return True mountpoints = {m.path: m.id for m in self.all_mounts()} log.debug('mountpoints %s', mountpoints) work = [ a for a in self._actions if not getattr(a, 'preserve', False) ] while work: next_work = [] for obj in work: if can_emit(obj): emit(obj) else: next_work.append(obj) if {a.id for a in next_work} == {a.id for a in work}: msg = ["rendering block devices made no progress processing:"] for w in work: msg.append(" - " + str(w)) raise Exception("\n".join(msg)) work = next_work return r def render(self): config = { 'storage': { 'version': 1, 'config': self._render_actions(), }, } if self.swap is not None: config['swap'] = self.swap if self.grub is not None: config['grub'] = self.grub return config def load_probe_data(self, probe_data): self._probe_data = probe_data self.reset() def _matcher(self, type, kw): for a in self._actions: if a.type != type: continue for k, v in kw.items(): if getattr(a, k) != v: break else: yield a def _one(self, *, type, **kw): try: return next(self._matcher(type, kw)) except StopIteration: return None def _all(self, *, type, **kw): return list(self._matcher(type, kw)) def all_mounts(self): return self._all(type='mount') def all_devices(self): # return: # compound devices, newest first # disk devices, sorted by label disks = [] compounds = [] for a in self._actions: if a.type == 'disk': disks.append(a) elif isinstance(a, _Device): compounds.append(a) compounds.reverse() disks.sort(key=lambda x: x.label) return compounds + disks def all_disks(self): return sorted(self._all(type='disk'), key=lambda x: x.label) def all_raids(self): return self._all(type='raid') def all_volgroups(self): return self._all(type='lvm_volgroup') def _remove(self, obj): _remove_backlinks(obj) self._actions.remove(obj) def add_partition(self, device, size, flag="", wipe=None, grub_device=None): if size > device.free_for_partitions: raise Exception("%s > %s", size, device.free_for_partitions) real_size = align_up(size) log.debug("add_partition: rounded size from %s to %s", size, real_size) if device._fs is not None: raise Exception("%s is already formatted" % (device.label,)) p = Partition( m=self, device=device, size=real_size, flag=flag, wipe=wipe, grub_device=grub_device) if flag in ("boot", "bios_grub", "prep"): device._partitions.insert(0, device._partitions.pop()) device.ptable = device.ptable_for_new_partition() dasd = device.dasd() if dasd is not None: dasd.device_layout = 'cdl' dasd.preserve = False self._actions.append(p) return p def remove_partition(self, part): if part._fs or part._constructed_device: raise Exception("can only remove empty partition") self._remove(part) if len(part.device._partitions) == 0: part.device.ptable = None def add_raid(self, name, raidlevel, devices, spare_devices): r = Raid( m=self, name=name, raidlevel=raidlevel, devices=devices, spare_devices=spare_devices) self._actions.append(r) return r def remove_raid(self, raid): if raid._fs or raid._constructed_device or len(raid.partitions()): raise Exception("can only remove empty RAID") self._remove(raid) def add_volgroup(self, name, devices): vg = LVM_VolGroup(m=self, name=name, devices=devices) self._actions.append(vg) return vg def remove_volgroup(self, vg): if len(vg._partitions): raise Exception("can only remove empty VG") self._remove(vg) def add_logical_volume(self, vg, name, size): lv = LVM_LogicalVolume(m=self, volgroup=vg, name=name, size=size) self._actions.append(lv) return lv def remove_logical_volume(self, lv): if lv._fs: raise Exception("can only remove empty LV") self._remove(lv) def add_dm_crypt(self, volume, key): if not volume.available: raise Exception("{} is not available".format(volume)) dm_crypt = DM_Crypt(volume=volume, key=key) self._actions.append(dm_crypt) return dm_crypt def remove_dm_crypt(self, dm_crypt): self._remove(dm_crypt) def add_filesystem(self, volume, fstype, preserve=False): log.debug("adding %s to %s", fstype, volume) if not volume.available: if not isinstance(volume, Partition): if (volume.flag == 'prep' or ( volume.flag == 'bios_grub' and fstype == 'fat32')): raise Exception("{} is not available".format(volume)) if volume._fs is not None: raise Exception("%s is already formatted") fs = Filesystem( m=self, volume=volume, fstype=fstype, preserve=preserve) self._actions.append(fs) return fs def remove_filesystem(self, fs): if fs._mount: raise Exception("can only remove unmounted filesystem") self._remove(fs) def add_mount(self, fs, path): if fs._mount is not None: raise Exception("%s is already mounted") m = Mount(m=self, device=fs, path=path) self._actions.append(m) # Adding a swap partition or mounting btrfs at / suppresses # the swapfile. if not self._should_add_swapfile(): self.swap = {'swap': 0} return m def remove_mount(self, mount): self._remove(mount) # Removing a mount might make it ok to add a swapfile again. if self._should_add_swapfile(): self.swap = None def needs_bootloader_partition(self): '''true if no disk have a boot partition, and one is needed''' # s390x has no such thing if self.bootloader == Bootloader.NONE: return False elif self.bootloader == Bootloader.BIOS: return self._one(type='disk', grub_device=True) is None elif self.bootloader == Bootloader.UEFI: for esp in self._all(type='partition', grub_device=True): if esp.fs() and esp.fs().mount(): if esp.fs().mount().path == '/boot/efi': return False return True elif self.bootloader == Bootloader.PREP: return self._one(type='partition', grub_device=True) is None else: raise AssertionError( "unknown bootloader type {}".format(self.bootloader)) def _mount_for_path(self, path): return self._one(type='mount', path=path) def is_root_mounted(self): return self._mount_for_path('/') is not None def can_install(self): return (self.is_root_mounted() and not self.needs_bootloader_partition()) def _should_add_swapfile(self): mount = self._mount_for_path('/') if mount is not None and mount.device.fstype == 'btrfs': return False for swap in self._all(type='format', fstype='swap'): if swap.mount(): return False return True
./CrossVul/dataset_final_sorted/CWE-532/py/bad_3972_0
crossvul-python_data_bad_18_2
import warnings import six from django.http import HttpResponse from django.utils.crypto import constant_time_compare from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_AUTHORIZATION in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: request_auth = get_request_basic_auth(request) # Use constant_time_compare to avoid timing attack on basic auth. (It's OK that any() # can terminate early: we're not trying to protect how many auth strings are allowed, # just the contents of each individual auth string.) auth_ok = any(constant_time_compare(request_auth, allowed_auth) for allowed_auth in self.basic_auth) if not auth_ok: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) Security note: use django.utils.crypto.constant_time_compare for string comparisons, to avoid exposing your validation to a timing attack. """ # if not constant_time_compare(request.POST['signature'], expected_signature): # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. Subclasses must override with class attr. E.g.: esp_name = "Postmark" esp_name = "SendGrid" # (use ESP's preferred capitalization) """ raise NotImplementedError("%s.%s must declare esp_name class attr" % (self.__class__.__module__, self.__class__.__name__))
./CrossVul/dataset_final_sorted/CWE-532/py/bad_18_2
crossvul-python_data_bad_18_0
from django.apps import AppConfig class AnymailBaseConfig(AppConfig): name = 'anymail' verbose_name = "Anymail" def ready(self): pass
./CrossVul/dataset_final_sorted/CWE-532/py/bad_18_0
crossvul-python_data_good_18_13
import base64 from django.test import override_settings, SimpleTestCase from mock import create_autospec, ANY from anymail.exceptions import AnymailInsecureWebhookWarning from anymail.signals import tracking, inbound from .utils import AnymailTestMixin, ClientWithCsrfChecks def event_handler(sender, event, esp_name, **kwargs): """Prototypical webhook signal handler""" pass @override_settings(ANYMAIL={'WEBHOOK_SECRET': 'username:password'}) class WebhookTestCase(AnymailTestMixin, SimpleTestCase): """Base for testing webhooks - connects webhook signal handlers - sets up basic auth by default (since most ESP webhooks warn if it's not enabled) """ client_class = ClientWithCsrfChecks def setUp(self): super(WebhookTestCase, self).setUp() # Use correct basic auth by default (individual tests can override): self.set_basic_auth() # Install mocked signal handlers self.tracking_handler = create_autospec(event_handler) tracking.connect(self.tracking_handler) self.addCleanup(tracking.disconnect, self.tracking_handler) self.inbound_handler = create_autospec(event_handler) inbound.connect(self.inbound_handler) self.addCleanup(inbound.disconnect, self.inbound_handler) def set_basic_auth(self, username='username', password='password'): """Set basic auth for all subsequent test client requests""" credentials = base64.b64encode("{}:{}".format(username, password).encode('utf-8')).decode('utf-8') self.client.defaults['HTTP_AUTHORIZATION'] = "Basic {}".format(credentials) def clear_basic_auth(self): self.client.defaults.pop('HTTP_AUTHORIZATION', None) def assert_handler_called_once_with(self, mockfn, *expected_args, **expected_kwargs): """Verifies mockfn was called with expected_args and at least expected_kwargs. Ignores *additional* actual kwargs (which might be added by Django signal dispatch). (This differs from mock.assert_called_once_with.) Returns the actual kwargs. """ self.assertEqual(mockfn.call_count, 1) actual_args, actual_kwargs = mockfn.call_args self.assertEqual(actual_args, expected_args) for key, expected_value in expected_kwargs.items(): if expected_value is ANY: self.assertIn(key, actual_kwargs) else: self.assertEqual(actual_kwargs[key], expected_value) return actual_kwargs def get_kwargs(self, mockfn): """Return the kwargs passed to the most recent call to mockfn""" self.assertIsNotNone(mockfn.call_args) # mockfn hasn't been called yet actual_args, actual_kwargs = mockfn.call_args return actual_kwargs # noinspection PyUnresolvedReferences class WebhookBasicAuthTestsMixin(object): """Common test cases for webhook basic authentication. Instantiate for each ESP's webhooks by: - mixing into WebhookTestCase - defining call_webhook to invoke the ESP's webhook """ should_warn_if_no_auth = True # subclass set False if other webhook verification used def call_webhook(self): # Concrete test cases should call a webhook via self.client.post, # and return the response raise NotImplementedError() @override_settings(ANYMAIL={}) # Clear the WEBHOOK_AUTH settings from superclass def test_warns_if_no_auth(self): if self.should_warn_if_no_auth: with self.assertWarns(AnymailInsecureWebhookWarning): response = self.call_webhook() else: with self.assertDoesNotWarn(AnymailInsecureWebhookWarning): response = self.call_webhook() self.assertEqual(response.status_code, 200) def test_verifies_basic_auth(self): response = self.call_webhook() self.assertEqual(response.status_code, 200) def test_verifies_bad_auth(self): self.set_basic_auth('baduser', 'wrongpassword') response = self.call_webhook() self.assertEqual(response.status_code, 400) def test_verifies_missing_auth(self): self.clear_basic_auth() response = self.call_webhook() self.assertEqual(response.status_code, 400) @override_settings(ANYMAIL={'WEBHOOK_SECRET': ['cred1:pass1', 'cred2:pass2']}) def test_supports_credential_rotation(self): """You can supply a list of basic auth credentials, and any is allowed""" self.set_basic_auth('cred1', 'pass1') response = self.call_webhook() self.assertEqual(response.status_code, 200) self.set_basic_auth('cred2', 'pass2') response = self.call_webhook() self.assertEqual(response.status_code, 200) self.set_basic_auth('baduser', 'wrongpassword') response = self.call_webhook() self.assertEqual(response.status_code, 400) @override_settings(ANYMAIL={'WEBHOOK_AUTHORIZATION': "username:password"}) def test_deprecated_setting(self): """The older WEBHOOK_AUTHORIZATION setting is still supported (for now)""" response = self.call_webhook() self.assertEqual(response.status_code, 200)
./CrossVul/dataset_final_sorted/CWE-532/py/good_18_13
crossvul-python_data_good_18_1
from django.conf import settings from django.core import checks def check_deprecated_settings(app_configs, **kwargs): errors = [] anymail_settings = getattr(settings, "ANYMAIL", {}) # anymail.W001: rename WEBHOOK_AUTHORIZATION to WEBHOOK_SECRET if "WEBHOOK_AUTHORIZATION" in anymail_settings: errors.append(checks.Warning( "The ANYMAIL setting 'WEBHOOK_AUTHORIZATION' has been renamed 'WEBHOOK_SECRET' to improve security.", hint="You must update your settings.py. The old name will stop working in a near-future release.", id="anymail.W001", )) if hasattr(settings, "ANYMAIL_WEBHOOK_AUTHORIZATION"): errors.append(checks.Warning( "The ANYMAIL_WEBHOOK_AUTHORIZATION setting has been renamed ANYMAIL_WEBHOOK_SECRET to improve security.", hint="You must update your settings.py. The old name will stop working in a near-future release.", id="anymail.W001", )) return errors
./CrossVul/dataset_final_sorted/CWE-532/py/good_18_1
crossvul-python_data_good_3972_0
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from abc import ABC, abstractmethod import attr import collections import enum import fnmatch import itertools import logging import math import os import pathlib import platform import tempfile from curtin import storage_config from curtin.util import human2bytes from probert.storage import StorageInfo log = logging.getLogger('subiquity.models.filesystem') def _set_backlinks(obj): for field in attr.fields(type(obj)): backlink = field.metadata.get('backlink') if backlink is None: continue v = getattr(obj, field.name) if v is None: continue if not isinstance(v, (list, set)): v = [v] for vv in v: b = getattr(vv, backlink, None) if isinstance(b, list): b.append(obj) elif isinstance(b, set): b.add(obj) else: setattr(vv, backlink, obj) def _remove_backlinks(obj): for field in attr.fields(type(obj)): backlink = field.metadata.get('backlink') if backlink is None: continue v = getattr(obj, field.name) if v is None: continue if not isinstance(v, (list, set)): v = [v] for vv in v: b = getattr(vv, backlink, None) if isinstance(b, list): b.remove(obj) elif isinstance(b, set): b.remove(obj) else: setattr(vv, backlink, None) _type_to_cls = {} def fsobj__repr(obj): args = [] for f in attr.fields(type(obj)): if f.name.startswith("_"): continue v = getattr(obj, f.name) if v is f.default: continue if f.metadata.get('ref', False): v = v.id elif f.metadata.get('reflist', False): if isinstance(v, set): delims = "{}" else: delims = "[]" v = delims[0] + ", ".join(vv.id for vv in v) + delims[1] elif f.metadata.get('redact', False): v = "<REDACTED>" else: v = repr(v) args.append("{}={}".format(f.name, v)) return "{}({})".format(type(obj).__name__, ", ".join(args)) def fsobj(typ): def wrapper(c): c.__attrs_post_init__ = _set_backlinks c.type = attributes.const(typ) c.id = attributes.idfield(typ) c._m = attr.ib(repr=None, default=None) c = attr.s(cmp=False, repr=False)(c) c.__repr__ = fsobj__repr _type_to_cls[typ] = c return c return wrapper def dependencies(obj): for f in attr.fields(type(obj)): v = getattr(obj, f.name) if not v: continue elif f.metadata.get('ref', False): yield v elif f.metadata.get('reflist', False): yield from v def reverse_dependencies(obj): for f in attr.fields(type(obj)): if not f.metadata.get('is_backlink', False): continue v = getattr(obj, f.name) if isinstance(v, (list, set)): yield from v elif v is not None: yield v @attr.s(cmp=False) class RaidLevel: name = attr.ib() value = attr.ib() min_devices = attr.ib() supports_spares = attr.ib(default=True) raidlevels = [ RaidLevel(_("0 (striped)"), "raid0", 2, False), RaidLevel(_("1 (mirrored)"), "raid1", 2), RaidLevel(_("5"), "raid5", 3), RaidLevel(_("6"), "raid6", 4), RaidLevel(_("10"), "raid10", 4), ] def _raidlevels_by_value(): r = {level.value: level for level in raidlevels} for n in 0, 1, 5, 6, 10: r[str(n)] = r[n] = r["raid"+str(n)] r["stripe"] = r["raid0"] r["mirror"] = r["raid1"] return r raidlevels_by_value = _raidlevels_by_value() HUMAN_UNITS = ['B', 'K', 'M', 'G', 'T', 'P'] def humanize_size(size): if size == 0: return "0B" p = int(math.floor(math.log(size, 2) / 10)) # We want to truncate the non-integral part, not round to nearest. s = "{:.17f}".format(size / 2 ** (10 * p)) i = s.index('.') s = s[:i + 4] return s + HUMAN_UNITS[int(p)] def dehumanize_size(size): # convert human 'size' to integer size_in = size if not size: raise ValueError("input cannot be empty") if not size[-1].isdigit(): suffix = size[-1].upper() size = size[:-1] else: suffix = None parts = size.split('.') if len(parts) > 2: raise ValueError(_("{!r} is not valid input").format(size_in)) elif len(parts) == 2: div = 10 ** len(parts[1]) size = parts[0] + parts[1] else: div = 1 try: num = int(size) except ValueError: raise ValueError(_("{!r} is not valid input").format(size_in)) if suffix is not None: if suffix not in HUMAN_UNITS: raise ValueError( "unrecognized suffix {!r} in {!r}".format(size_in[-1], size_in)) mult = 2 ** (10 * HUMAN_UNITS.index(suffix)) else: mult = 1 if num < 0: raise ValueError("{!r}: cannot be negative".format(size_in)) return num * mult // div DEFAULT_CHUNK = 512 # The calculation of how much of a device mdadm uses for raid is more than a # touch ridiculous. What follows is a translation of the code at: # https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/tree/super1.c, # specifically choose_bm_space and the end of validate_geometry1. Note that # that calculations are in terms of 512-byte sectors. # # We make some assumptions about the defaults mdadm uses but mostly that the # default metadata version is 1.2, and other formats use less space. # # Note that data_offset is computed for the first disk mdadm examines and then # used for all devices, so the order matters! (Well, if the size of the # devices vary, which is not normal but also not something we prevent). # # All this is tested against reality in ./scripts/get-raid-sizes.py def calculate_data_offset_bytes(devsize): # Convert to sectors to make it easier to compare this code to mdadm's (we # convert back at the end) devsize >>= 9 devsize = align_down(devsize, DEFAULT_CHUNK) # conversion of choose_bm_space: if devsize < 64*2: bmspace = 0 elif devsize - 64*2 >= 200*1024*1024*2: bmspace = 128*2 elif devsize - 4*2 > 8*1024*1024*2: bmspace = 64*2 else: bmspace = 4*2 # From the end of validate_geometry1, assuming metadata 1.2. headroom = 128*1024*2 while (headroom << 10) > devsize and headroom / 2 >= DEFAULT_CHUNK*2*2: headroom >>= 1 data_offset = 12*2 + bmspace + headroom log.debug( "get_raid_size: adjusting for %s sectors of overhead", data_offset) data_offset = align_up(data_offset, 2*1024) # convert back to bytes return data_offset << 9 def raid_device_sort(devices): # Because the device order matters to mdadm, we sort consistently but # arbitrarily when computing the size and when rendering the config (so # curtin passes the devices to mdadm in the order we calculate the size # for) return sorted(devices, key=lambda d: d.id) def get_raid_size(level, devices): if len(devices) == 0: return 0 devices = raid_device_sort(devices) data_offset = calculate_data_offset_bytes(devices[0].size) sizes = [align_down(dev.size - data_offset) for dev in devices] min_size = min(sizes) if min_size <= 0: return 0 if level == "raid0": return sum(sizes) elif level == "raid1": return min_size elif level == "raid5": return min_size * (len(devices) - 1) elif level == "raid6": return min_size * (len(devices) - 2) elif level == "raid10": return min_size * (len(devices) // 2) else: raise ValueError("unknown raid level %s" % level) # These are only defaults but curtin does not let you change/specify # them at this time. LVM_OVERHEAD = (1 << 20) LVM_CHUNK_SIZE = 4 * (1 << 20) def get_lvm_size(devices, size_overrides={}): r = 0 for d in devices: r += align_down( size_overrides.get(d, d.size) - LVM_OVERHEAD, LVM_CHUNK_SIZE) return r def _conv_size(s): if isinstance(s, str): if '%' in s: return s return int(human2bytes(s)) return s class attributes: # Just a namespace to hang our wrappers around attr.ib() off. @staticmethod def idfield(base): i = 0 def factory(): nonlocal i r = "%s-%s" % (base, i) i += 1 return r return attr.ib(default=attr.Factory(factory)) @staticmethod def ref(*, backlink=None): metadata = {'ref': True} if backlink: metadata['backlink'] = backlink return attr.ib(metadata=metadata) @staticmethod def reflist(*, backlink=None, default=attr.NOTHING): metadata = {'reflist': True} if backlink: metadata['backlink'] = backlink return attr.ib(metadata=metadata, default=default) @staticmethod def backlink(*, default=None): return attr.ib( init=False, default=default, metadata={'is_backlink': True}) @staticmethod def const(value): return attr.ib(default=value) @staticmethod def size(): return attr.ib(converter=_conv_size) @staticmethod def ptable(): def conv(val): if val == "dos": val = "msdos" return val return attr.ib(default=None, converter=conv) def asdict(inst): r = collections.OrderedDict() for field in attr.fields(type(inst)): if field.name.startswith('_'): continue m = getattr(inst, 'serialize_' + field.name, None) if m: r.update(m()) else: v = getattr(inst, field.name) if v is not None: if field.metadata.get('ref', False): r[field.name] = v.id elif field.metadata.get('reflist', False): r[field.name] = [elem.id for elem in v] else: r[field.name] = v return r # This code is not going to make much sense unless you have read # http://curtin.readthedocs.io/en/latest/topics/storage.html. The # Disk, Partition etc classes correspond to entries in curtin's # storage config list. They are mostly 'dumb data', all the logic is # in the FilesystemModel or FilesystemController classes. class DeviceAction(enum.Enum): INFO = _("Info") EDIT = _("Edit") REFORMAT = _("Reformat") PARTITION = _("Add Partition") CREATE_LV = _("Create Logical Volume") FORMAT = _("Format") REMOVE = _("Remove from RAID/LVM") DELETE = _("Delete") TOGGLE_BOOT = _("Make Boot Device") def _generic_can_EDIT(obj): cd = obj.constructed_device() if cd is None: return True return _( "Cannot edit {selflabel} as it is part of the {cdtype} " "{cdname}.").format( selflabel=obj.label, cdtype=cd.desc(), cdname=cd.label) def _generic_can_REMOVE(obj): cd = obj.constructed_device() if cd is None: return False if cd.preserve: return _("Cannot remove {selflabel} from pre-existing {cdtype} " "{cdlabel}.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label) if isinstance(cd, Raid): if obj in cd.spare_devices: return True min_devices = raidlevels_by_value[cd.raidlevel].min_devices if len(cd.devices) == min_devices: return _( "Removing {selflabel} would leave the {cdtype} {cdlabel} with " "less than {min_devices} devices.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label, min_devices=min_devices) elif isinstance(cd, LVM_VolGroup): if len(cd.devices) == 1: return _( "Removing {selflabel} would leave the {cdtype} {cdlabel} with " "no devices.").format( selflabel=obj.label, cdtype=cd.desc(), cdlabel=cd.label) return True def _generic_can_DELETE(obj): cd = obj.constructed_device() if cd is None: return True return _( "Cannot delete {selflabel} as it is part of the {cdtype} " "{cdname}.").format( selflabel=obj.label, cdtype=cd.desc(), cdname=cd.label) @attr.s(cmp=False) class _Formattable(ABC): # Base class for anything that can be formatted and mounted, # e.g. a disk or a RAID or a partition. @property @abstractmethod def label(self): pass @property def annotations(self): preserve = getattr(self, 'preserve', None) if preserve is None: return [] elif preserve: return [_("existing")] else: return [_("new")] # Filesystem _fs = attributes.backlink() # Raid or LVM_VolGroup for now, but one day ZPool, BCache... _constructed_device = attributes.backlink() def usage_labels(self): cd = self.constructed_device() if cd is not None: return [ _("{component_name} of {desc} {name}").format( component_name=cd.component_name, desc=cd.desc(), name=cd.name), ] fs = self.fs() if fs is not None: if fs.preserve: format_desc = _("already formatted as {fstype}") elif self.original_fstype() is not None: format_desc = _("to be reformatted as {fstype}") else: format_desc = _("to be formatted as {fstype}") r = [format_desc.format(fstype=fs.fstype)] if self._m.is_mounted_filesystem(fs.fstype): m = fs.mount() if m: r.append(_("mounted at {path}").format(path=m.path)) elif getattr(self, 'flag', None) != "boot": r.append(_("not mounted")) elif fs.preserve: if fs.mount() is None: r.append(_("unused")) else: r.append(_("used")) return r else: return [_("unused")] def _is_entirely_used(self): return self._fs is not None or self._constructed_device is not None def fs(self): return self._fs def original_fstype(self): for action in self._m._orig_config: if action['type'] == 'format' and action['volume'] == self.id: return action['fstype'] for action in self._m._orig_config: if action['id'] == self.id and action.get('flag') == 'swap': return 'swap' return None def constructed_device(self, skip_dm_crypt=True): cd = self._constructed_device if cd is None: return None elif cd.type == "dm_crypt" and skip_dm_crypt: return cd._constructed_device else: return cd @property @abstractmethod def supported_actions(self): pass def action_possible(self, action): assert action in self.supported_actions r = getattr(self, "_can_" + action.name) if isinstance(r, bool): return r, None elif isinstance(r, str): return False, r else: return r @property @abstractmethod def ok_for_raid(self): pass @property @abstractmethod def ok_for_lvm_vg(self): pass # Nothing is put in the first and last megabytes of the disk to allow # space for the GPT data. GPT_OVERHEAD = 2 * (1 << 20) @attr.s(cmp=False) class _Device(_Formattable, ABC): # Anything that can have partitions, e.g. a disk or a RAID. @property @abstractmethod def size(self): pass # [Partition] _partitions = attributes.backlink(default=attr.Factory(list)) def dasd(self): return None def ptable_for_new_partition(self): if self.ptable is not None: return self.ptable for action in self._m._orig_config: if action['id'] == self.id: if action.get('ptable') == 'vtoc': return action['ptable'] if self.dasd() is not None: return 'vtoc' return 'gpt' def partitions(self): return self._partitions @property def used(self): if self._is_entirely_used(): return self.size r = 0 for p in self._partitions: if p.flag == "extended": continue r += p.size return r @property def empty(self): return self.used == 0 @property def available_for_partitions(self): return self.size - GPT_OVERHEAD @property def free_for_partitions(self): return self.available_for_partitions - self.used def available(self): # A _Device is available if: # 1) it is not part of a device like a RAID or LVM or zpool or ... # 2) if it is formatted, it is available if it is formatted with fs # that needs to be mounted and is not mounted # 3) if it is not formatted, it is available if it has free # space OR at least one partition is not formatted or is formatted # with a fs that needs to be mounted and is not mounted if self._constructed_device is not None: return False if self._fs is not None: return self._fs._available() if self.free_for_partitions > 0: if not self._has_preexisting_partition(): return True for p in self._partitions: if p.available(): return True return False def has_unavailable_partition(self): for p in self._partitions: if not p.available(): return True return False def _has_preexisting_partition(self): for p in self._partitions: if p.preserve: return True else: return False @property def _can_DELETE(self): mounted_partitions = 0 for p in self._partitions: if p.fs() and p.fs().mount(): mounted_partitions += 1 elif p.constructed_device(): cd = p.constructed_device() return _( "Cannot delete {selflabel} as partition {partnum} is part " "of the {cdtype} {cdname}.").format( selflabel=self.label, partnum=p._number, cdtype=cd.desc(), cdname=cd.label, ) if mounted_partitions > 1: return _( "Cannot delete {selflabel} because it has {count} mounted " "partitions.").format( selflabel=self.label, count=mounted_partitions) elif mounted_partitions == 1: return _( "Cannot delete {selflabel} because it has 1 mounted partition." ).format(selflabel=self.label) else: return _generic_can_DELETE(self) @fsobj("dasd") class Dasd: device_id = attr.ib() blocksize = attr.ib() disk_layout = attr.ib() label = attr.ib(default=None) mode = attr.ib(default=None) preserve = attr.ib(default=False) @fsobj("disk") class Disk(_Device): ptable = attributes.ptable() serial = attr.ib(default=None) wwn = attr.ib(default=None) multipath = attr.ib(default=None) path = attr.ib(default=None) model = attr.ib(default=None) wipe = attr.ib(default=None) preserve = attr.ib(default=False) name = attr.ib(default="") grub_device = attr.ib(default=False) device_id = attr.ib(default=None) _info = attr.ib(default=None) def info_for_display(self): bus = self._info.raw.get('ID_BUS', None) major = self._info.raw.get('MAJOR', None) if bus is None and major == '253': bus = 'virtio' devpath = self._info.raw.get('DEVPATH', self.path) # XXX probert should be doing this!! rotational = '1' try: dev = os.path.basename(devpath) rfile = '/sys/class/block/{}/queue/rotational'.format(dev) rotational = open(rfile, 'r').read().strip() except (PermissionError, FileNotFoundError, IOError): log.exception('WARNING: Failed to read file {}'.format(rfile)) pass dinfo = { 'bus': bus, 'devname': self.path, 'devpath': devpath, 'model': self.model or 'unknown', 'serial': self.serial or 'unknown', 'wwn': self.wwn or 'unknown', 'multipath': self.multipath or 'unknown', 'size': self.size, 'humansize': humanize_size(self.size), 'vendor': self._info.vendor or 'unknown', 'rotational': 'true' if rotational == '1' else 'false', } return dinfo @property def size(self): return align_down(self._info.size) @property def annotations(self): return [] def desc(self): if self.multipath: return _("multipath device") return _("local disk") @property def label(self): if self.multipath: return self.wwn return self.serial or self.path def dasd(self): return self._m._one(type='dasd', device_id=self.device_id) def _can_be_boot_disk(self): bl = self._m.bootloader if self._has_preexisting_partition(): if bl == Bootloader.BIOS: if self.ptable == "msdos": return True else: return self._partitions[0].flag == "bios_grub" else: flag = {Bootloader.UEFI: "boot", Bootloader.PREP: "prep"}[bl] for p in self._partitions: if p.flag == flag: return True return False else: return True @property def supported_actions(self): actions = [ DeviceAction.INFO, DeviceAction.REFORMAT, DeviceAction.PARTITION, DeviceAction.FORMAT, DeviceAction.REMOVE, ] if self._m.bootloader != Bootloader.NONE: actions.append(DeviceAction.TOGGLE_BOOT) return actions _can_INFO = True @property def _can_REFORMAT(self): if len(self._partitions) == 0: return False for p in self._partitions: if p._constructed_device is not None: return False return True @property def _can_PARTITION(self): if self._has_preexisting_partition(): return False if self.free_for_partitions <= 0: return False if self.ptable == 'vtoc' and len(self._partitions) >= 3: return False return True _can_FORMAT = property( lambda self: len(self._partitions) == 0 and self._constructed_device is None) _can_REMOVE = property(_generic_can_REMOVE) def _is_boot_device(self): bl = self._m.bootloader if bl == Bootloader.NONE: return False elif bl == Bootloader.BIOS: return self.grub_device elif bl in [Bootloader.PREP, Bootloader.UEFI]: for p in self._partitions: if p.grub_device: return True return False @property def _can_TOGGLE_BOOT(self): if self._is_boot_device(): for disk in self._m.all_disks(): if disk is not self and disk._is_boot_device(): return True return False elif self._fs is not None or self._constructed_device is not None: return False else: return self._can_be_boot_disk() @property def ok_for_raid(self): if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False if len(self._partitions) > 0: return False return True ok_for_lvm_vg = ok_for_raid @fsobj("partition") class Partition(_Formattable): device = attributes.ref(backlink="_partitions") # Disk size = attributes.size() wipe = attr.ib(default=None) flag = attr.ib(default=None) number = attr.ib(default=None) preserve = attr.ib(default=False) grub_device = attr.ib(default=False) @property def annotations(self): r = super().annotations if self.flag == "prep": r.append("PReP") if self.preserve: if self.grub_device: r.append(_("configured")) else: r.append(_("unconfigured")) elif self.flag == "boot": if self.fs() and self.fs().mount(): r.append(_("primary ESP")) elif self.grub_device: r.append(_("backup ESP")) else: r.append(_("unused ESP")) elif self.flag == "bios_grub": if self.preserve: if self.device.grub_device: r.append(_("configured")) else: r.append(_("unconfigured")) r.append("bios_grub") elif self.flag == "extended": r.append(_("extended")) elif self.flag == "logical": r.append(_("logical")) return r def usage_labels(self): if self.flag == "prep" or self.flag == "bios_grub": return [] return super().usage_labels() def desc(self): return _("partition of {device}").format(device=self.device.desc()) @property def label(self): return _("partition {number} of {device}").format( number=self._number, device=self.device.label) @property def short_label(self): return _("partition {number}").format(number=self._number) def available(self): if self.flag in ['bios_grub', 'prep'] or self.grub_device: return False if self._constructed_device is not None: return False if self._fs is None: return True return self._fs._available() def serialize_number(self): return {'number': self._number} @property def _number(self): if self.preserve: return self.number else: return self.device._partitions.index(self) + 1 supported_actions = [ DeviceAction.EDIT, DeviceAction.REMOVE, DeviceAction.DELETE, ] _can_EDIT = property(_generic_can_EDIT) _can_REMOVE = property(_generic_can_REMOVE) @property def _can_DELETE(self): if self.device._has_preexisting_partition(): return _("Cannot delete a single partition from a device that " "already has partitions.") if self.flag in ('boot', 'bios_grub', 'prep'): return _("Cannot delete required bootloader partition") return _generic_can_DELETE(self) @property def ok_for_raid(self): if self.flag in ('boot', 'bios_grub', 'prep'): return False if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False return True ok_for_lvm_vg = ok_for_raid @fsobj("raid") class Raid(_Device): name = attr.ib() raidlevel = attr.ib(converter=lambda x: raidlevels_by_value[x].value) devices = attributes.reflist(backlink="_constructed_device") def serialize_devices(self): # Surprisingly, the order of devices passed to mdadm --create # matters (see get_raid_size) so we sort devices here the same # way get_raid_size does. return {'devices': [d.id for d in raid_device_sort(self.devices)]} spare_devices = attributes.reflist( backlink="_constructed_device", default=attr.Factory(set)) preserve = attr.ib(default=False) ptable = attributes.ptable() @property def size(self): return get_raid_size(self.raidlevel, self.devices) @property def available_for_partitions(self): # For some reason, the overhead on RAID devices seems to be # higher (may be related to alignment of underlying # partitions) return self.size - 2*GPT_OVERHEAD @property def label(self): return self.name def desc(self): return _("software RAID {level}").format(level=self.raidlevel[4:]) supported_actions = [ DeviceAction.EDIT, DeviceAction.PARTITION, DeviceAction.FORMAT, DeviceAction.REMOVE, DeviceAction.DELETE, DeviceAction.REFORMAT, ] @property def _can_EDIT(self): if self.preserve: return _("Cannot edit pre-existing RAIDs.") elif len(self._partitions) > 0: return _( "Cannot edit {selflabel} because it has partitions.").format( selflabel=self.label) else: return _generic_can_EDIT(self) _can_PARTITION = Disk._can_PARTITION _can_REFORMAT = Disk._can_REFORMAT _can_FORMAT = property( lambda self: len(self._partitions) == 0 and self._constructed_device is None) _can_REMOVE = property(_generic_can_REMOVE) @property def ok_for_raid(self): if self._fs is not None: if self._fs.preserve: return self._fs._mount is None return False if self._constructed_device is not None: return False if len(self._partitions) > 0: return False return True ok_for_lvm_vg = ok_for_raid # What is a device that makes up this device referred to as? component_name = "component" @fsobj("lvm_volgroup") class LVM_VolGroup(_Device): name = attr.ib() devices = attributes.reflist(backlink="_constructed_device") preserve = attr.ib(default=False) @property def size(self): # Should probably query actual size somehow for an existing VG! return get_lvm_size(self.devices) @property def available_for_partitions(self): return self.size @property def annotations(self): r = super().annotations member = next(iter(self.devices)) if member.type == "dm_crypt": r.append(_("encrypted")) return r @property def label(self): return self.name def desc(self): return _("LVM volume group") supported_actions = [ DeviceAction.EDIT, DeviceAction.CREATE_LV, DeviceAction.DELETE, ] @property def _can_EDIT(self): if self.preserve: return _("Cannot edit pre-existing volume groups.") elif len(self._partitions) > 0: return _( "Cannot edit {selflabel} because it has logical " "volumes.").format( selflabel=self.label) else: return _generic_can_EDIT(self) _can_CREATE_LV = property( lambda self: not self.preserve and self.free_for_partitions > 0) ok_for_raid = False ok_for_lvm_vg = False # What is a device that makes up this device referred to as? component_name = "PV" @fsobj("lvm_partition") class LVM_LogicalVolume(_Formattable): name = attr.ib() volgroup = attributes.ref(backlink="_partitions") # LVM_VolGroup size = attributes.size() preserve = attr.ib(default=False) def serialize_size(self): return {'size': "{}B".format(self.size)} def available(self): if self._constructed_device is not None: return False if self._fs is None: return True return self._fs._available() @property def flag(self): return None # hack! def desc(self): return _("LVM logical volume") @property def short_label(self): return self.name label = short_label supported_actions = [ DeviceAction.EDIT, DeviceAction.DELETE, ] _can_EDIT = True @property def _can_DELETE(self): if self.volgroup._has_preexisting_partition(): return _("Cannot delete a single logical volume from a volume " "group that already has logical volumes.") return True ok_for_raid = False ok_for_lvm_vg = False LUKS_OVERHEAD = 16*(2**20) @fsobj("dm_crypt") class DM_Crypt: volume = attributes.ref(backlink="_constructed_device") # _Formattable key = attr.ib(metadata={'redact': True}) def serialize_key(self): if self.key: f = tempfile.NamedTemporaryFile( prefix='luks-key-', mode='w', delete=False) f.write(self.key) f.close() return {'keyfile': f.name} else: return {} dm_name = attr.ib(default=None) preserve = attr.ib(default=False) _constructed_device = attributes.backlink() def constructed_device(self): return self._constructed_device @property def size(self): return self.volume.size - LUKS_OVERHEAD @fsobj("format") class Filesystem: fstype = attr.ib() volume = attributes.ref(backlink="_fs") # _Formattable label = attr.ib(default=None) uuid = attr.ib(default=None) preserve = attr.ib(default=False) _mount = attributes.backlink() def mount(self): return self._mount def _available(self): # False if mounted or if fs does not require a mount, True otherwise. if self._mount is None: if self.preserve: return True else: return FilesystemModel.is_mounted_filesystem(self.fstype) else: return False @fsobj("mount") class Mount: device = attributes.ref(backlink="_mount") # Filesystem path = attr.ib() def can_delete(self): # Can't delete mount of /boot/efi or swap, anything else is fine. if not self.path: # swap mount return False if not isinstance(self.device.volume, Partition): # Can't be /boot/efi if volume is not a partition return True if self.device.volume.flag == "boot": # /boot/efi return False return True def align_up(size, block_size=1 << 20): return (size + block_size - 1) & ~(block_size - 1) def align_down(size, block_size=1 << 20): return size & ~(block_size - 1) class Bootloader(enum.Enum): NONE = "NONE" # a system where the bootloader is external, e.g. s390x BIOS = "BIOS" # BIOS, where the bootloader dd-ed to the start of a device UEFI = "UEFI" # UEFI, ESPs and /boot/efi and all that (amd64 and arm64) PREP = "PREP" # ppc64el, which puts grub on a PReP partition class FilesystemModel(object): lower_size_limit = 128 * (1 << 20) target = None @classmethod def is_mounted_filesystem(self, fstype): if fstype in [None, 'swap']: return False else: return True def _probe_bootloader(self): # This will at some point change to return a list so that we can # configure BIOS _and_ UEFI on amd64 systems. if os.path.exists('/sys/firmware/efi'): return Bootloader.UEFI elif platform.machine().startswith("ppc64"): return Bootloader.PREP elif platform.machine() == "s390x": return Bootloader.NONE else: return Bootloader.BIOS def __init__(self): self.bootloader = self._probe_bootloader() self._probe_data = None self.reset() def reset(self): if self._probe_data is not None: self._orig_config = storage_config.extract_storage_config( self._probe_data)["storage"]["config"] self._actions = self._actions_from_config( self._orig_config, self._probe_data['blockdev']) else: self._orig_config = [] self._actions = [] self.swap = None self.grub = None def _make_matchers(self, match): matchers = [] def match_serial(disk): if disk.serial is not None: return fnmatch.fnmatchcase(disk.serial, match['serial']) def match_model(disk): if disk.model is not None: return fnmatch.fnmatchcase(disk.model, match['model']) def match_path(disk): if disk.path is not None: return fnmatch.fnmatchcase(disk.path, match['path']) def match_ssd(disk): is_ssd = disk.info_for_display()['rotational'] == 'false' return is_ssd == match['ssd'] if 'serial' in match: matchers.append(match_serial) if 'model' in match: matchers.append(match_model) if 'path' in match: matchers.append(match_path) if 'ssd' in match: matchers.append(match_ssd) return matchers def disk_for_match(self, disks, match): matchers = self._make_matchers(match) candidates = [] for candidate in disks: for matcher in matchers: if not matcher(candidate): break else: candidates.append(candidate) if match.get('size') == 'largest': candidates.sort(key=lambda d: d.size, reverse=True) if candidates: return candidates[0] return None def apply_autoinstall_config(self, ai_config): disks = self.all_disks() for action in ai_config: if action['type'] == 'disk': disk = None if 'serial' in action: disk = self._one(type='disk', serial=action['serial']) elif 'path' in action: disk = self._one(type='disk', path=action['path']) else: match = action.pop('match', {}) disk = self.disk_for_match(disks, match) if disk is None: action['match'] = match if disk is None: raise Exception("{} matched no disk".format(action)) if disk not in disks: raise Exception( "{} matched {} which was already used".format( action, disk)) disks.remove(disk) action['path'] = disk.path action['serial'] = disk.serial self._actions = self._actions_from_config( ai_config, self._probe_data['blockdev'], is_autoinstall=True) for p in self._all(type="partition") + self._all(type="lvm_partition"): [parent] = list(dependencies(p)) if isinstance(p.size, int): if p.size < 0: if p is not parent.partitions()[-1]: raise Exception( "{} has negative size but is not final partition " "of {}".format(p, parent)) p.size = 0 p.size = parent.free_for_partitions elif isinstance(p.size, str): if p.size.endswith("%"): percentage = int(p.size[:-1]) p.size = align_down( parent.available_for_partitions*percentage//100) else: p.size = dehumanize_size(p.size) def _actions_from_config(self, config, blockdevs, is_autoinstall=False): """Convert curtin storage config into action instances. curtin represents storage "actions" as defined in https://curtin.readthedocs.io/en/latest/topics/storage.html. We convert each action (that we know about) into an instance of Disk, Partition, RAID, etc (unknown actions, e.g. bcache, are just ignored). We also filter out anything that can be reached from a currently mounted device. The motivation here is only to exclude the media subiquity is mounted from, so this might be a bit excessive but hey it works. Perhaps surprisingly the order of the returned actions matters. The devices are presented in the filesystem view in the reverse of the order they appear in _actions, which means that e.g. a RAID appears higher up the list than the disks is is composed of. This is quite important as it makes "unpeeling" existing compound structures easy, you just delete the top device until you only have disks left. """ byid = {} objs = [] exclusions = set() seen_multipaths = set() for action in config: if not is_autoinstall and action['type'] == 'mount': if not action['path'].startswith(self.target): # Completely ignore mounts under /target, they are # probably leftovers from a previous install # attempt. exclusions.add(byid[action['device']]) continue c = _type_to_cls.get(action['type'], None) if c is None: # Ignore any action we do not know how to process yet # (e.g. bcache) continue kw = {} for f in attr.fields(c): n = f.name if n not in action: continue v = action[n] try: if f.metadata.get('ref', False): kw[n] = byid[v] elif f.metadata.get('reflist', False): kw[n] = [byid[id] for id in v] else: kw[n] = v except KeyError: # If a dependency of the current action has been # ignored, we need to ignore the current action too # (e.g. a bcache's filesystem). continue if kw['type'] == 'disk': path = kw['path'] kw['info'] = StorageInfo({path: blockdevs[path]}) if not is_autoinstall: kw['preserve'] = True obj = byid[action['id']] = c(m=self, **kw) multipath = kw.get('multipath') if multipath: if multipath in seen_multipaths: exclusions.add(obj) else: seen_multipaths.add(multipath) objs.append(obj) while True: next_exclusions = exclusions.copy() for e in exclusions: next_exclusions.update(itertools.chain( dependencies(e), reverse_dependencies(e))) if len(exclusions) == len(next_exclusions): break exclusions = next_exclusions log.debug("exclusions %s", {e.id for e in exclusions}) objs = [o for o in objs if o not in exclusions] if not is_autoinstall: for o in objs: if o.type == "partition" and o.flag == "swap": if o._fs is None: objs.append(Filesystem( m=self, fstype="swap", volume=o, preserve=True)) return objs def _render_actions(self): # The curtin storage config has the constraint that an action must be # preceded by all the things that it depends on. We handle this by # repeatedly iterating over all actions and checking if we can emit # each action by checking if all of the actions it depends on have been # emitted. Eventually this will either emit all actions or stop making # progress -- which means there is a cycle in the definitions, # something the UI should have prevented <wink>. r = [] emitted_ids = set() def emit(obj): if isinstance(obj, Raid): log.debug( "FilesystemModel: estimated size of %s %s is %s", obj.raidlevel, obj.name, obj.size) r.append(asdict(obj)) emitted_ids.add(obj.id) def ensure_partitions(dev): for part in dev.partitions(): if part.id not in emitted_ids: if part not in work and part not in next_work: next_work.append(part) def can_emit(obj): if obj.type == "partition": ensure_partitions(obj.device) for p in obj.device.partitions(): if p._number < obj._number and p.id not in emitted_ids: return False for dep in dependencies(obj): if dep.id not in emitted_ids: if dep not in work and dep not in next_work: next_work.append(dep) if dep.type in ['disk', 'raid']: ensure_partitions(dep) return False if isinstance(obj, Mount): # Any mount actions for a parent of this one have to be emitted # first. for parent in pathlib.Path(obj.path).parents: parent = str(parent) if parent in mountpoints: if mountpoints[parent] not in emitted_ids: log.debug( "cannot emit action to mount %s until that " "for %s is emitted", obj.path, parent) return False return True mountpoints = {m.path: m.id for m in self.all_mounts()} log.debug('mountpoints %s', mountpoints) work = [ a for a in self._actions if not getattr(a, 'preserve', False) ] while work: next_work = [] for obj in work: if can_emit(obj): emit(obj) else: next_work.append(obj) if {a.id for a in next_work} == {a.id for a in work}: msg = ["rendering block devices made no progress processing:"] for w in work: msg.append(" - " + str(w)) raise Exception("\n".join(msg)) work = next_work return r def render(self): config = { 'storage': { 'version': 1, 'config': self._render_actions(), }, } if self.swap is not None: config['swap'] = self.swap if self.grub is not None: config['grub'] = self.grub return config def load_probe_data(self, probe_data): self._probe_data = probe_data self.reset() def _matcher(self, type, kw): for a in self._actions: if a.type != type: continue for k, v in kw.items(): if getattr(a, k) != v: break else: yield a def _one(self, *, type, **kw): try: return next(self._matcher(type, kw)) except StopIteration: return None def _all(self, *, type, **kw): return list(self._matcher(type, kw)) def all_mounts(self): return self._all(type='mount') def all_devices(self): # return: # compound devices, newest first # disk devices, sorted by label disks = [] compounds = [] for a in self._actions: if a.type == 'disk': disks.append(a) elif isinstance(a, _Device): compounds.append(a) compounds.reverse() disks.sort(key=lambda x: x.label) return compounds + disks def all_disks(self): return sorted(self._all(type='disk'), key=lambda x: x.label) def all_raids(self): return self._all(type='raid') def all_volgroups(self): return self._all(type='lvm_volgroup') def _remove(self, obj): _remove_backlinks(obj) self._actions.remove(obj) def add_partition(self, device, size, flag="", wipe=None, grub_device=None): if size > device.free_for_partitions: raise Exception("%s > %s", size, device.free_for_partitions) real_size = align_up(size) log.debug("add_partition: rounded size from %s to %s", size, real_size) if device._fs is not None: raise Exception("%s is already formatted" % (device.label,)) p = Partition( m=self, device=device, size=real_size, flag=flag, wipe=wipe, grub_device=grub_device) if flag in ("boot", "bios_grub", "prep"): device._partitions.insert(0, device._partitions.pop()) device.ptable = device.ptable_for_new_partition() dasd = device.dasd() if dasd is not None: dasd.device_layout = 'cdl' dasd.preserve = False self._actions.append(p) return p def remove_partition(self, part): if part._fs or part._constructed_device: raise Exception("can only remove empty partition") self._remove(part) if len(part.device._partitions) == 0: part.device.ptable = None def add_raid(self, name, raidlevel, devices, spare_devices): r = Raid( m=self, name=name, raidlevel=raidlevel, devices=devices, spare_devices=spare_devices) self._actions.append(r) return r def remove_raid(self, raid): if raid._fs or raid._constructed_device or len(raid.partitions()): raise Exception("can only remove empty RAID") self._remove(raid) def add_volgroup(self, name, devices): vg = LVM_VolGroup(m=self, name=name, devices=devices) self._actions.append(vg) return vg def remove_volgroup(self, vg): if len(vg._partitions): raise Exception("can only remove empty VG") self._remove(vg) def add_logical_volume(self, vg, name, size): lv = LVM_LogicalVolume(m=self, volgroup=vg, name=name, size=size) self._actions.append(lv) return lv def remove_logical_volume(self, lv): if lv._fs: raise Exception("can only remove empty LV") self._remove(lv) def add_dm_crypt(self, volume, key): if not volume.available: raise Exception("{} is not available".format(volume)) dm_crypt = DM_Crypt(volume=volume, key=key) self._actions.append(dm_crypt) return dm_crypt def remove_dm_crypt(self, dm_crypt): self._remove(dm_crypt) def add_filesystem(self, volume, fstype, preserve=False): log.debug("adding %s to %s", fstype, volume) if not volume.available: if not isinstance(volume, Partition): if (volume.flag == 'prep' or ( volume.flag == 'bios_grub' and fstype == 'fat32')): raise Exception("{} is not available".format(volume)) if volume._fs is not None: raise Exception("%s is already formatted") fs = Filesystem( m=self, volume=volume, fstype=fstype, preserve=preserve) self._actions.append(fs) return fs def remove_filesystem(self, fs): if fs._mount: raise Exception("can only remove unmounted filesystem") self._remove(fs) def add_mount(self, fs, path): if fs._mount is not None: raise Exception("%s is already mounted") m = Mount(m=self, device=fs, path=path) self._actions.append(m) # Adding a swap partition or mounting btrfs at / suppresses # the swapfile. if not self._should_add_swapfile(): self.swap = {'swap': 0} return m def remove_mount(self, mount): self._remove(mount) # Removing a mount might make it ok to add a swapfile again. if self._should_add_swapfile(): self.swap = None def needs_bootloader_partition(self): '''true if no disk have a boot partition, and one is needed''' # s390x has no such thing if self.bootloader == Bootloader.NONE: return False elif self.bootloader == Bootloader.BIOS: return self._one(type='disk', grub_device=True) is None elif self.bootloader == Bootloader.UEFI: for esp in self._all(type='partition', grub_device=True): if esp.fs() and esp.fs().mount(): if esp.fs().mount().path == '/boot/efi': return False return True elif self.bootloader == Bootloader.PREP: return self._one(type='partition', grub_device=True) is None else: raise AssertionError( "unknown bootloader type {}".format(self.bootloader)) def _mount_for_path(self, path): return self._one(type='mount', path=path) def is_root_mounted(self): return self._mount_for_path('/') is not None def can_install(self): return (self.is_root_mounted() and not self.needs_bootloader_partition()) def _should_add_swapfile(self): mount = self._mount_for_path('/') if mount is not None and mount.device.fstype == 'btrfs': return False for swap in self._all(type='format', fstype='swap'): if swap.mount(): return False return True
./CrossVul/dataset_final_sorted/CWE-532/py/good_3972_0
crossvul-python_data_bad_18_13
import base64 from django.test import override_settings, SimpleTestCase from mock import create_autospec, ANY from anymail.exceptions import AnymailInsecureWebhookWarning from anymail.signals import tracking, inbound from .utils import AnymailTestMixin, ClientWithCsrfChecks def event_handler(sender, event, esp_name, **kwargs): """Prototypical webhook signal handler""" pass @override_settings(ANYMAIL={'WEBHOOK_AUTHORIZATION': 'username:password'}) class WebhookTestCase(AnymailTestMixin, SimpleTestCase): """Base for testing webhooks - connects webhook signal handlers - sets up basic auth by default (since most ESP webhooks warn if it's not enabled) """ client_class = ClientWithCsrfChecks def setUp(self): super(WebhookTestCase, self).setUp() # Use correct basic auth by default (individual tests can override): self.set_basic_auth() # Install mocked signal handlers self.tracking_handler = create_autospec(event_handler) tracking.connect(self.tracking_handler) self.addCleanup(tracking.disconnect, self.tracking_handler) self.inbound_handler = create_autospec(event_handler) inbound.connect(self.inbound_handler) self.addCleanup(inbound.disconnect, self.inbound_handler) def set_basic_auth(self, username='username', password='password'): """Set basic auth for all subsequent test client requests""" credentials = base64.b64encode("{}:{}".format(username, password).encode('utf-8')).decode('utf-8') self.client.defaults['HTTP_AUTHORIZATION'] = "Basic {}".format(credentials) def clear_basic_auth(self): self.client.defaults.pop('HTTP_AUTHORIZATION', None) def assert_handler_called_once_with(self, mockfn, *expected_args, **expected_kwargs): """Verifies mockfn was called with expected_args and at least expected_kwargs. Ignores *additional* actual kwargs (which might be added by Django signal dispatch). (This differs from mock.assert_called_once_with.) Returns the actual kwargs. """ self.assertEqual(mockfn.call_count, 1) actual_args, actual_kwargs = mockfn.call_args self.assertEqual(actual_args, expected_args) for key, expected_value in expected_kwargs.items(): if expected_value is ANY: self.assertIn(key, actual_kwargs) else: self.assertEqual(actual_kwargs[key], expected_value) return actual_kwargs def get_kwargs(self, mockfn): """Return the kwargs passed to the most recent call to mockfn""" self.assertIsNotNone(mockfn.call_args) # mockfn hasn't been called yet actual_args, actual_kwargs = mockfn.call_args return actual_kwargs # noinspection PyUnresolvedReferences class WebhookBasicAuthTestsMixin(object): """Common test cases for webhook basic authentication. Instantiate for each ESP's webhooks by: - mixing into WebhookTestCase - defining call_webhook to invoke the ESP's webhook """ should_warn_if_no_auth = True # subclass set False if other webhook verification used def call_webhook(self): # Concrete test cases should call a webhook via self.client.post, # and return the response raise NotImplementedError() @override_settings(ANYMAIL={}) # Clear the WEBHOOK_AUTH settings from superclass def test_warns_if_no_auth(self): if self.should_warn_if_no_auth: with self.assertWarns(AnymailInsecureWebhookWarning): response = self.call_webhook() else: with self.assertDoesNotWarn(AnymailInsecureWebhookWarning): response = self.call_webhook() self.assertEqual(response.status_code, 200) def test_verifies_basic_auth(self): response = self.call_webhook() self.assertEqual(response.status_code, 200) def test_verifies_bad_auth(self): self.set_basic_auth('baduser', 'wrongpassword') response = self.call_webhook() self.assertEqual(response.status_code, 400) def test_verifies_missing_auth(self): self.clear_basic_auth() response = self.call_webhook() self.assertEqual(response.status_code, 400) @override_settings(ANYMAIL={'WEBHOOK_AUTHORIZATION': ['cred1:pass1', 'cred2:pass2']}) def test_supports_credential_rotation(self): """You can supply a list of basic auth credentials, and any is allowed""" self.set_basic_auth('cred1', 'pass1') response = self.call_webhook() self.assertEqual(response.status_code, 200) self.set_basic_auth('cred2', 'pass2') response = self.call_webhook() self.assertEqual(response.status_code, 200) self.set_basic_auth('baduser', 'wrongpassword') response = self.call_webhook() self.assertEqual(response.status_code, 400)
./CrossVul/dataset_final_sorted/CWE-532/py/bad_18_13
crossvul-python_data_bad_3250_4
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup setup( name='security_monkey', version='0.8.0', long_description=__doc__, packages=['security_monkey'], include_package_data=True, zip_safe=False, install_requires=[ 'APScheduler==2.1.2', 'Flask==0.10.1', 'Flask-Login==0.2.10', 'Flask-Mail==0.9.0', 'Flask-Migrate==1.3.1', 'Flask-Principal==0.4.0', 'Flask-RESTful==0.3.3', 'Flask-SQLAlchemy==1.0', 'Flask-Script==0.6.3', 'Flask-Security==1.7.4', 'Flask-WTF==0.9.5', 'Jinja2==2.8', 'SQLAlchemy==0.9.2', 'boto>=2.41.0', 'ipaddr==2.1.11', 'itsdangerous==0.23', 'psycopg2==2.5.2', 'bcrypt==2.0.0', 'Sphinx==1.2.2', 'gunicorn==18.0', 'cryptography==1.3.2', 'boto3>=1.4.2', 'botocore>=1.4.81', 'dpath==1.3.2', 'pyyaml==3.11', 'jira==0.32', 'cloudaux>=1.0.6', 'joblib>=0.9.4', 'pyjwt>=1.01', ], extras_require = { 'onelogin': ['python-saml>=2.2.0'], 'tests': [ 'nose==1.3.0', 'mock==1.0.1', 'moto==0.4.30', 'freezegun>=0.3.7' ] } )
./CrossVul/dataset_final_sorted/CWE-601/py/bad_3250_4
crossvul-python_data_bad_1915_12
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from prometheus_client import Counter from twisted.internet.error import AlreadyCalled, AlreadyCancelled from synapse.api.constants import EventTypes from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException from synapse.types import RoomStreamToken from . import push_rule_evaluator, push_tools logger = logging.getLogger(__name__) http_push_processed_counter = Counter( "synapse_http_httppusher_http_pushes_processed", "Number of push notifications successfully sent", ) http_push_failed_counter = Counter( "synapse_http_httppusher_http_pushes_failed", "Number of push notifications which failed", ) http_badges_processed_counter = Counter( "synapse_http_httppusher_badge_updates_processed", "Number of badge updates successfully sent", ) http_badges_failed_counter = Counter( "synapse_http_httppusher_badge_updates_failed", "Number of badge updates which failed", ) class HttpPusher: INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes MAX_BACKOFF_SEC = 60 * 60 # This one's in ms because we compare it against the clock GIVE_UP_AFTER_MS = 24 * 60 * 60 * 1000 def __init__(self, hs, pusherdict): self.hs = hs self.store = self.hs.get_datastore() self.storage = self.hs.get_storage() self.clock = self.hs.get_clock() self.state_handler = self.hs.get_state_handler() self.user_id = pusherdict["user_name"] self.app_id = pusherdict["app_id"] self.app_display_name = pusherdict["app_display_name"] self.device_display_name = pusherdict["device_display_name"] self.pushkey = pusherdict["pushkey"] self.pushkey_ts = pusherdict["ts"] self.data = pusherdict["data"] self.last_stream_ordering = pusherdict["last_stream_ordering"] self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.failing_since = pusherdict["failing_since"] self.timed_call = None self._is_processing = False self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room # This is the highest stream ordering we know it's safe to process. # When new events arrive, we'll be given a window of new events: we # should honour this rather than just looking for anything higher # because of potential out-of-order event serialisation. This starts # off as None though as we don't know any better. self.max_stream_ordering = None if "data" not in pusherdict: raise PusherConfigException("No 'data' key for HTTP pusher") self.data = pusherdict["data"] self.name = "%s/%s/%s" % ( pusherdict["user_name"], pusherdict["app_id"], pusherdict["pushkey"], ) if self.data is None: raise PusherConfigException("data can not be null for HTTP pusher") if "url" not in self.data: raise PusherConfigException("'url' required in data for HTTP pusher") self.url = self.data["url"] self.http_client = hs.get_proxied_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] def on_started(self, should_check_for_notifs): """Called when this pusher has been started. Args: should_check_for_notifs (bool): Whether we should immediately check for push to send. Set to False only if it's known there is nothing to send """ if should_check_for_notifs: self._start_processing() def on_new_notifications(self, max_token: RoomStreamToken): # We just use the minimum stream ordering and ignore the vector clock # component. This is safe to do as long as we *always* ignore the vector # clock components. max_stream_ordering = max_token.stream self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) self._start_processing() def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... run_as_background_process("http_pusher.on_new_receipts", self._update_badge) async def _update_badge(self): # XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems # to be largely redundant. perhaps we can remove it. badge = await push_tools.get_badge_count( self.hs.get_datastore(), self.user_id, group_by_room=self._group_unread_count_by_room, ) await self._send_badge(badge) def on_timer(self): self._start_processing() def on_stop(self): if self.timed_call: try: self.timed_call.cancel() except (AlreadyCalled, AlreadyCancelled): pass self.timed_call = None def _start_processing(self): if self._is_processing: return run_as_background_process("httppush.process", self._process) async def _process(self): # we should never get here if we are already processing assert not self._is_processing try: self._is_processing = True # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: starting_max_ordering = self.max_stream_ordering try: await self._unsafe_process() except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break finally: self._is_processing = False async def _unsafe_process(self): """ Looks for unset notifications and dispatch them, in order Never call this directly: use _process which will only allow this to run once per pusher. """ fn = self.store.get_unread_push_actions_for_user_in_range_for_http unprocessed = await fn( self.user_id, self.last_stream_ordering, self.max_stream_ordering ) logger.info( "Processing %i unprocessed push actions for %s starting at " "stream_ordering %s", len(unprocessed), self.name, self.last_stream_ordering, ) for push_action in unprocessed: with opentracing.start_active_span( "http-push", tags={ "authenticated_entity": self.user_id, "event_id": push_action["event_id"], "app_id": self.app_id, "app_display_name": self.app_display_name, }, ): processed = await self._process_one(push_action) if processed: http_push_processed_counter.inc() self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] pusher_still_exists = await self.store.update_pusher_last_stream_ordering_and_success( self.app_id, self.pushkey, self.user_id, self.last_stream_ordering, self.clock.time_msec(), ) if not pusher_still_exists: # The pusher has been deleted while we were processing, so # lets just stop and return. self.on_stop() return if self.failing_since: self.failing_since = None await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: http_push_failed_counter.inc() if not self.failing_since: self.failing_since = self.clock.time_msec() await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) if ( self.failing_since and self.failing_since < self.clock.time_msec() - HttpPusher.GIVE_UP_AFTER_MS ): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warning( "Giving up on a notification to user %s, pushkey %s", self.user_id, self.pushkey, ) self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] pusher_still_exists = await self.store.update_pusher_last_stream_ordering( self.app_id, self.pushkey, self.user_id, self.last_stream_ordering, ) if not pusher_still_exists: # The pusher has been deleted while we were processing, so # lets just stop and return. self.on_stop() return self.failing_since = None await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: logger.info("Push failed: delaying for %ds", self.backoff_delay) self.timed_call = self.hs.get_reactor().callLater( self.backoff_delay, self.on_timer ) self.backoff_delay = min( self.backoff_delay * 2, self.MAX_BACKOFF_SEC ) break async def _process_one(self, push_action): if "notify" not in push_action["actions"]: return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) badge = await push_tools.get_badge_count( self.hs.get_datastore(), self.user_id, group_by_room=self._group_unread_count_by_room, ) event = await self.store.get_event(push_action["event_id"], allow_none=True) if event is None: return True # It's been redacted rejected = await self.dispatch_push(event, tweaks, badge) if rejected is False: return False if isinstance(rejected, list) or isinstance(rejected, tuple): for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warning( ("Ignoring rejected pushkey %s because we didn't send it"), pk, ) else: logger.info("Pushkey %s was rejected: removing", pk) await self.hs.remove_pusher(self.app_id, pk, self.user_id) return True async def _build_notification_dict(self, event, tweaks, badge): priority = "low" if ( event.type == EventTypes.Encrypted or tweaks.get("highlight") or tweaks.get("sound") ): # HACK send our push as high priority only if it generates a sound, highlight # or may do so (i.e. is encrypted so has unknown effects). priority = "high" if self.data.get("format") == "event_id_only": d = { "notification": { "event_id": event.event_id, "room_id": event.room_id, "counts": {"unread": badge}, "prio": priority, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, } ], } } return d ctx = await push_tools.get_context_for_event( self.storage, self.state_handler, event, self.user_id ) d = { "notification": { "id": event.event_id, # deprecated: remove soon "event_id": event.event_id, "room_id": event.room_id, "type": event.type, "sender": event.user_id, "prio": priority, "counts": { "unread": badge, # 'missed_calls': 2 }, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, "tweaks": tweaks, } ], } } if event.type == "m.room.member" and event.is_state(): d["notification"]["membership"] = event.content["membership"] d["notification"]["user_is_target"] = event.state_key == self.user_id if self.hs.config.push_include_content and event.content: d["notification"]["content"] = event.content # We no longer send aliases separately, instead, we send the human # readable name of the room, which may be an alias. if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0: d["notification"]["sender_display_name"] = ctx["sender_display_name"] if "name" in ctx and len(ctx["name"]) > 0: d["notification"]["room_name"] = ctx["name"] return d async def dispatch_push(self, event, tweaks, badge): notification_dict = await self._build_notification_dict(event, tweaks, badge) if not notification_dict: return [] try: resp = await self.http_client.post_json_get_json( self.url, notification_dict ) except Exception as e: logger.warning( "Failed to push event %s to %s: %s %s", event.event_id, self.name, type(e), e, ) return False rejected = [] if "rejected" in resp: rejected = resp["rejected"] return rejected async def _send_badge(self, badge): """ Args: badge (int): number of unread messages """ logger.debug("Sending updated badge count %d to %s", badge, self.name) d = { "notification": { "id": "", "type": None, "sender": "", "counts": {"unread": badge}, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, } ], } } try: await self.http_client.post_json_get_json(self.url, d) http_badges_processed_counter.inc() except Exception as e: logger.warning( "Failed to send badge count to %s: %s %s", self.name, type(e), e ) http_badges_failed_counter.inc()
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_12
crossvul-python_data_good_1915_5
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple, Union, ) from prometheus_client import Counter, Gauge, Histogram from twisted.internet import defer from twisted.internet.abstract import isIPAddress from twisted.python import failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, FederationError, IncompatibleRoomVersionError, NotFoundError, SynapseError, UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, run_in_background, ) from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, ) from synapse.types import JsonDict, get_domain_from_id from synapse.util import glob_to_regex, json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: from synapse.server import HomeServer # when processing incoming transactions, we try to handle multiple rooms in # parallel, up to this limit. TRANSACTION_CONCURRENCY_LIMIT = 10 logger = logging.getLogger(__name__) received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") received_edus_counter = Counter("synapse_federation_server_received_edus", "") received_queries_counter = Counter( "synapse_federation_server_received_queries", "", ["type"] ) pdu_process_time = Histogram( "synapse_federation_server_pdu_process_time", "Time taken to process an event", ) last_pdu_age_metric = Gauge( "synapse_federation_last_received_pdu_age", "The age (in seconds) of the last PDU successfully received from the given domain", labelnames=("server_name",), ) class FederationServer(FederationBase): def __init__(self, hs): super().__init__(hs) self.auth = hs.get_auth() self.handler = hs.get_federation_handler() self.state = hs.get_state_handler() self.device_handler = hs.get_device_handler() # Ensure the following handlers are loaded since they register callbacks # with FederationHandlerRegistry. hs.get_directory_handler() self._federation_ratelimiter = hs.get_federation_ratelimiter() self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") # We cache results for transaction with the same ID self._transaction_resp_cache = ResponseCache( hs, "fed_txn_handler", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self.transaction_actions = TransactionActions(self.store) self.registry = hs.get_federation_registry() # We cache responses to state queries, as they take a while and often # come in waves. self._state_resp_cache = ResponseCache( hs, "state_resp", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self._state_ids_resp_cache = ResponseCache( hs, "state_ids_resp", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self._federation_metrics_domains = ( hs.get_config().federation.federation_metrics_domains ) async def on_backfill_request( self, origin: str, room_id: str, versions: List[str], limit: int ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdus = await self.handler.on_backfill_request( origin, room_id, versions, limit ) res = self._transaction_from_pdus(pdus).get_dict() return 200, res async def on_incoming_transaction( self, origin: str, transaction_data: JsonDict ) -> Tuple[int, Dict[str, Any]]: # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() transaction = Transaction(**transaction_data) transaction_id = transaction.transaction_id # type: ignore if not transaction_id: raise Exception("Transaction missing transaction_id") logger.debug("[%s] Got transaction", transaction_id) # We wrap in a ResponseCache so that we de-duplicate retried # transactions. return await self._transaction_resp_cache.wrap( (origin, transaction_id), self._on_incoming_transaction_inner, origin, transaction, request_time, ) async def _on_incoming_transaction_inner( self, origin: str, transaction: Transaction, request_time: int ) -> Tuple[int, Dict[str, Any]]: # Use a linearizer to ensure that transactions from a remote are # processed in order. with await self._transaction_linearizer.queue(origin): # We rate limit here *after* we've queued up the incoming requests, # so that we don't fill up the ratelimiter with blocked requests. # # This is important as the ratelimiter allows N concurrent requests # at a time, and only starts ratelimiting if there are more requests # than that being processed at a time. If we queued up requests in # the linearizer/response cache *after* the ratelimiting then those # queued up requests would count as part of the allowed limit of N # concurrent requests. with self._federation_ratelimiter.ratelimit(origin) as d: await d result = await self._handle_incoming_transaction( origin, transaction, request_time ) return result async def _handle_incoming_transaction( self, origin: str, transaction: Transaction, request_time: int ) -> Tuple[int, Dict[str, Any]]: """ Process an incoming transaction and return the HTTP response Args: origin: the server making the request transaction: incoming transaction request_time: timestamp that the HTTP request arrived at Returns: HTTP response code and body """ response = await self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( "[%s] We've already responded to this request", transaction.transaction_id, # type: ignore ) return response logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore # Reject if PDU count > 50 or EDU count > 100 if len(transaction.pdus) > 50 or ( # type: ignore hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore ): logger.info("Transaction PDU or EDU count too large. Returning 400") response = {} await self.transaction_actions.set_response( origin, transaction, 400, response ) return 400, response # We process PDUs and EDUs in parallel. This is important as we don't # want to block things like to device messages from reaching clients # behind the potentially expensive handling of PDUs. pdu_results, _ = await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self._handle_pdus_in_txn, origin, transaction, request_time ), run_in_background(self._handle_edus_in_txn, origin, transaction), ], consumeErrors=True, ).addErrback(unwrapFirstError) ) response = {"pdus": pdu_results} logger.debug("Returning: %s", str(response)) await self.transaction_actions.set_response(origin, transaction, 200, response) return 200, response async def _handle_pdus_in_txn( self, origin: str, transaction: Transaction, request_time: int ) -> Dict[str, dict]: """Process the PDUs in a received transaction. Args: origin: the server making the request transaction: incoming transaction request_time: timestamp that the HTTP request arrived at Returns: A map from event ID of a processed PDU to any errors we should report back to the sending server. """ received_pdus_counter.inc(len(transaction.pdus)) # type: ignore origin_host, _ = parse_server_name(origin) pdus_by_room = {} # type: Dict[str, List[EventBase]] newest_pdu_ts = 0 for p in transaction.pdus: # type: ignore # FIXME (richardv): I don't think this works: # https://github.com/matrix-org/synapse/issues/8429 if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: p["age"] = unsigned["age"] if "age" in p: p["age_ts"] = request_time - int(p["age"]) del p["age"] # We try and pull out an event ID so that if later checks fail we # can log something sensible. We don't mandate an event ID here in # case future event formats get rid of the key. possible_event_id = p.get("event_id", "<Unknown>") # Now we get the room ID so that we can check that we know the # version of the room. room_id = p.get("room_id") if not room_id: logger.info( "Ignoring PDU as does not have a room_id. Event ID: %s", possible_event_id, ) continue try: room_version = await self.store.get_room_version(room_id) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue except UnsupportedRoomVersionError as e: # this can happen if support for a given room version is withdrawn, # so that we still get events for said room. logger.info("Ignoring PDU: %s", e) continue event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) if event.origin_server_ts > newest_pdu_ts: newest_pdu_ts = event.origin_server_ts pdu_results = {} # we can process different rooms in parallel (which is useful if they # require callouts to other servers to fetch missing events), but # impose a limit to avoid going too crazy with ram/cpu. async def process_pdus_for_room(room_id: str): logger.debug("Processing PDUs for %s", room_id) try: await self.check_server_matches_acl(origin_host, room_id) except AuthError as e: logger.warning("Ignoring PDUs for room %s from banned server", room_id) for pdu in pdus_by_room[room_id]: event_id = pdu.event_id pdu_results[event_id] = e.error_dict() return for pdu in pdus_by_room[room_id]: event_id = pdu.event_id with pdu_process_time.time(): with nested_logging_context(event_id): try: await self._handle_received_pdu(origin, pdu) pdu_results[event_id] = {} except FederationError as e: logger.warning("Error handling PDU %s: %s", event_id, e) pdu_results[event_id] = {"error": str(e)} except Exception as e: f = failure.Failure() pdu_results[event_id] = {"error": str(e)} logger.error( "Failed to handle PDU %s", event_id, exc_info=(f.type, f.value, f.getTracebackObject()), ) await concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT ) if newest_pdu_ts and origin in self._federation_metrics_domains: newest_pdu_age = self._clock.time_msec() - newest_pdu_ts last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000) return pdu_results async def _handle_edus_in_txn(self, origin: str, transaction: Transaction): """Process the EDUs in a received transaction. """ async def _process_edu(edu_dict): received_edus_counter.inc() edu = Edu( origin=origin, destination=self.server_name, edu_type=edu_dict["edu_type"], content=edu_dict["content"], ) await self.registry.on_edu(edu.edu_type, origin, edu.content) await concurrently_execute( _process_edu, getattr(transaction, "edus", []), TRANSACTION_CONCURRENCY_LIMIT, ) async def on_room_state_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # we grab the linearizer to protect ourselves from servers which hammer # us. In theory we might already have the response to this query # in the cache so we could return it without waiting for the linearizer # - but that's non-trivial to get right, and anyway somewhat defeats # the point of the linearizer. with (await self._server_linearizer.queue((origin, room_id))): resp = dict( await self._state_resp_cache.wrap( (room_id, event_id), self._on_context_state_request_compute, room_id, event_id, ) ) room_version = await self.store.get_room_version_id(room_id) resp["room_version"] = room_version return 200, resp async def on_state_ids_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: if not event_id: raise NotImplementedError("Specify an event") origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") resp = await self._state_ids_resp_cache.wrap( (room_id, event_id), self._on_state_ids_request_compute, room_id, event_id, ) return 200, resp async def _on_state_ids_request_compute(self, room_id, event_id): state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = await self.store.get_auth_chain_ids(state_ids) return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} async def _on_context_state_request_compute( self, room_id: str, event_id: str ) -> Dict[str, list]: if event_id: pdus = await self.handler.get_state_for_pdu(room_id, event_id) else: pdus = (await self.state.get_current_state(room_id)).values() auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus]) return { "pdus": [pdu.get_pdu_json() for pdu in pdus], "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], } async def on_pdu_request( self, origin: str, event_id: str ) -> Tuple[int, Union[JsonDict, str]]: pdu = await self.handler.get_persisted_pdu(origin, event_id) if pdu: return 200, self._transaction_from_pdus([pdu]).get_dict() else: return 404, "" async def on_query_request( self, query_type: str, args: Dict[str, str] ) -> Tuple[int, Dict[str, Any]]: received_queries_counter.labels(query_type).inc() resp = await self.registry.on_query(query_type, args) return 200, resp async def on_make_join_request( self, origin: str, room_id: str, user_id: str, supported_versions: List[str] ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) room_version = await self.store.get_room_version_id(room_id) if room_version not in supported_versions: logger.warning( "Room version %s not in %s", room_version, supported_versions ) raise IncompatibleRoomVersionError(room_version=room_version) pdu = await self.handler.on_make_join_request(origin, room_id, user_id) time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} async def on_invite_request( self, origin: str, content: JsonDict, room_version_id: str ) -> Dict[str, Any]: room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) if not room_version: raise SynapseError( 400, "Homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) pdu = await self._check_sigs_and_hash(room_version, pdu) ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} async def on_send_join_request( self, origin: str, content: JsonDict ) -> Dict[str, Any]: logger.debug("on_send_join_request: content: %s", content) assert_params_in_dict(content, ["room_id"]) room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) pdu = await self._check_sigs_and_hash(room_version, pdu) res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() return { "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]], } async def on_make_leave_request( self, origin: str, room_id: str, user_id: str ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdu = await self.handler.on_make_leave_request(origin, room_id, user_id) room_version = await self.store.get_room_version_id(room_id) time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict: logger.debug("on_send_leave_request: content: %s", content) assert_params_in_dict(content, ["room_id"]) room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) pdu = await self._check_sigs_and_hash(room_version, pdu) await self.handler.on_send_leave_request(origin, pdu) return {} async def on_event_auth( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) time_now = self._clock.time_msec() auth_pdus = await self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} return 200, res @log_function async def on_query_client_keys( self, origin: str, content: Dict[str, str] ) -> Tuple[int, Dict[str, Any]]: return await self.on_query_request("client_keys", content) async def on_query_user_devices( self, origin: str, user_id: str ) -> Tuple[int, Dict[str, Any]]: keys = await self.device_handler.on_federation_query_user_devices(user_id) return 200, keys @trace async def on_claim_client_keys( self, origin: str, content: JsonDict ) -> Dict[str, Any]: query = [] for user_id, device_keys in content.get("one_time_keys", {}).items(): for device_id, algorithm in device_keys.items(): query.append((user_id, device_id, algorithm)) log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self.store.claim_e2e_one_time_keys(query) json_result = {} # type: Dict[str, Dict[str, dict]] for user_id, device_keys in results.items(): for device_id, keys in device_keys.items(): for key_id, json_str in keys.items(): json_result.setdefault(user_id, {})[device_id] = { key_id: json_decoder.decode(json_str) } logger.info( "Claimed one-time-keys: %s", ",".join( ( "%s for %s:%s" % (key_id, user_id, device_id) for user_id, user_keys in json_result.items() for device_id, device_keys in user_keys.items() for key_id, _ in device_keys.items() ) ), ) return {"one_time_keys": json_result} async def on_get_missing_events( self, origin: str, room_id: str, earliest_events: List[str], latest_events: List[str], limit: int, ) -> Dict[str, list]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) logger.debug( "on_get_missing_events: earliest_events: %r, latest_events: %r," " limit: %d", earliest_events, latest_events, limit, ) missing_events = await self.handler.on_get_missing_events( origin, room_id, earliest_events, latest_events, limit ) if len(missing_events) < 5: logger.debug( "Returning %d events: %r", len(missing_events), missing_events ) else: logger.debug("Returning %d events", len(missing_events)) time_now = self._clock.time_msec() return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function async def on_openid_userinfo(self, token: str) -> Optional[str]: ts_now_ms = self._clock.time_msec() return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction: """Returns a new Transaction containing the given PDUs suitable for transmission. """ time_now = self._clock.time_msec() pdus = [p.get_pdu_json(time_now) for p in pdu_list] return Transaction( origin=self.server_name, pdus=pdus, origin_server_ts=int(time_now), destination=None, ) async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None: """ Process a PDU received in a federation /send/ transaction. If the event is invalid, then this method throws a FederationError. (The error will then be logged and sent back to the sender (which probably won't do anything with it), and other events in the transaction will be processed as normal). It is likely that we'll then receive other events which refer to this rejected_event in their prev_events, etc. When that happens, we'll attempt to fetch the rejected event again, which will presumably fail, so those second-generation events will also get rejected. Eventually, we get to the point where there are more than 10 events between any new events and the original rejected event. Since we only try to backfill 10 events deep on received pdu, we then accept the new event, possibly introducing a discontinuity in the DAG, with new forward extremities, so normal service is approximately returned, until we try to backfill across the discontinuity. Args: origin: server which sent the pdu pdu: received pdu Raises: FederationError if the signatures / hash do not match, or if the event was unacceptable for any other reason (eg, too large, too many prev_events, couldn't find the prev_events) """ # check that it's actually being sent from a valid destination to # workaround bug #1753 in 0.18.5 and 0.18.6 if origin != get_domain_from_id(pdu.sender): # We continue to accept join events from any server; this is # necessary for the federation join dance to work correctly. # (When we join over federation, the "helper" server is # responsible for sending out the join event, rather than the # origin. See bug #1893. This is also true for some third party # invites). if not ( pdu.type == "m.room.member" and pdu.content and pdu.content.get("membership", None) in (Membership.JOIN, Membership.INVITE) ): logger.info( "Discarding PDU %s from invalid origin %s", pdu.event_id, origin ) return else: logger.info("Accepting join PDU %s from %s", pdu.event_id, origin) # We've already checked that we know the room version by this point room_version = await self.store.get_room_version(pdu.room_id) # Check signature. try: pdu = await self._check_sigs_and_hash(room_version, pdu) except SynapseError as e: raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id) await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True) def __str__(self): return "<ReplicationLayer(%s)>" % self.server_name async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict ): ret = await self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) return ret async def on_exchange_third_party_invite_request(self, event_dict: Dict): ret = await self.handler.on_exchange_third_party_invite_request(event_dict) return ret async def check_server_matches_acl(self, server_name: str, room_id: str): """Check if the given server is allowed by the server ACLs in the room Args: server_name: name of server, *without any port part* room_id: ID of the room to check Raises: AuthError if the server does not match the ACL """ state_ids = await self.store.get_current_state_ids(room_id) acl_event_id = state_ids.get((EventTypes.ServerACL, "")) if not acl_event_id: return acl_event = await self.store.get_event(acl_event_id) if server_matches_acl_event(server_name, acl_event): return raise AuthError(code=403, msg="Server is banned from room") def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: """Check if the given server is allowed by the ACL event Args: server_name: name of server, without any port part acl_event: m.room.server_acl event Returns: True if this server is allowed by the ACLs """ logger.debug("Checking %s against acl %s", server_name, acl_event.content) # first of all, check if literal IPs are blocked, and if so, whether the # server name is a literal IP allow_ip_literals = acl_event.content.get("allow_ip_literals", True) if not isinstance(allow_ip_literals, bool): logger.warning("Ignoring non-bool allow_ip_literals flag") allow_ip_literals = True if not allow_ip_literals: # check for ipv6 literals. These start with '['. if server_name[0] == "[": return False # check for ipv4 literals. We can just lift the routine from twisted. if isIPAddress(server_name): return False # next, check the deny list deny = acl_event.content.get("deny", []) if not isinstance(deny, (list, tuple)): logger.warning("Ignoring non-list deny ACL %s", deny) deny = [] for e in deny: if _acl_entry_matches(server_name, e): # logger.info("%s matched deny rule %s", server_name, e) return False # then the allow list. allow = acl_event.content.get("allow", []) if not isinstance(allow, (list, tuple)): logger.warning("Ignoring non-list allow ACL %s", allow) allow = [] for e in allow: if _acl_entry_matches(server_name, e): # logger.info("%s matched allow rule %s", server_name, e) return True # everything else should be rejected. # logger.info("%s fell through", server_name) return False def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool: if not isinstance(acl_entry, str): logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) ) return False regex = glob_to_regex(acl_entry) return bool(regex.match(server_name)) class FederationHandlerRegistry: """Allows classes to register themselves as handlers for a given EDU or query type for incoming federation traffic. """ def __init__(self, hs: "HomeServer"): self.config = hs.config self.clock = hs.get_clock() self._instance_name = hs.get_instance_name() # These are safe to load in monolith mode, but will explode if we try # and use them. However we have guards before we use them to ensure that # we don't route to ourselves, and in monolith mode that will always be # the case. self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs) self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs) self.edu_handlers = ( {} ) # type: Dict[str, Callable[[str, dict], Awaitable[None]]] self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]] # Map from type to instance name that we should route EDU handling to. self._edu_type_to_instance = {} # type: Dict[str, str] def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] ): """Sets the handler callable that will be used to handle an incoming federation EDU of the given type. Args: edu_type: The type of the incoming EDU to register handler for handler: A callable invoked on incoming EDU of the given type. The arguments are the origin server name and the EDU contents. """ if edu_type in self.edu_handlers: raise KeyError("Already have an EDU handler for %s" % (edu_type,)) logger.info("Registering federation EDU handler for %r", edu_type) self.edu_handlers[edu_type] = handler def register_query_handler( self, query_type: str, handler: Callable[[dict], defer.Deferred] ): """Sets the handler callable that will be used to handle an incoming federation query of the given type. Args: query_type: Category name of the query, which should match the string used by make_query. handler: Invoked to handle incoming queries of this type. The return will be yielded on and the result used as the response to the query request. """ if query_type in self.query_handlers: raise KeyError("Already have a Query handler for %s" % (query_type,)) logger.info("Registering federation query handler for %r", query_type) self.query_handlers[query_type] = handler def register_instance_for_edu(self, edu_type: str, instance_name: str): """Register that the EDU handler is on a different instance than master. """ self._edu_type_to_instance[edu_type] = instance_name async def on_edu(self, edu_type: str, origin: str, content: dict): if not self.config.use_presence and edu_type == "m.presence": return # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: with start_active_span_from_edu(content, "handle_edu"): try: await handler(origin, content) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) except Exception: logger.exception("Failed to handle edu %r", edu_type) return # Check if we can route it somewhere else that isn't us route_to = self._edu_type_to_instance.get(edu_type, "master") if route_to != self._instance_name: try: await self._send_edu( instance_name=route_to, edu_type=edu_type, origin=origin, content=content, ) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) except Exception: logger.exception("Failed to handle edu %r", edu_type) return # Oh well, let's just log and move on. logger.warning("No handler registered for EDU type %s", edu_type) async def on_query(self, query_type: str, args: dict): handler = self.query_handlers.get(query_type) if handler: return await handler(args) # Check if we can route it somewhere else that isn't us if self._instance_name == "master": return await self._get_query_client(query_type=query_type, args=args) # Uh oh, no handler! Let's raise an exception so the request returns an # error. logger.warning("No handler registered for query type %s", query_type) raise NotFoundError("No handler for Query type '%s'" % (query_type,))
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_5
crossvul-python_data_bad_753_0
"""Tornado handlers for logging into the notebook.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os try: from urllib.parse import urlparse # Py 3 except ImportError: from urlparse import urlparse # Py 2 import uuid from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import IPythonHandler class LoginHandler(IPythonHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url if not url.startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if '://' in url: # if full URL, run our cross-origin check: parsed = urlparse(url.lower()) origin = '%s://%s' % (parsed.scheme, parsed.netloc) if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get('allow_password_change'): config_dir = self.settings.get('config_dir') config_file = os.path.join(config_dir, 'jupyter_notebook_config.json') set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the notebook application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The notebook server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/bad_753_0
crossvul-python_data_good_1915_2
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import logging import sys from typing import Dict, Iterable, Optional, Set from typing_extensions import ContextManager from twisted.internet import address, reactor import synapse import synapse.events from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError from synapse.api.urls import ( CLIENT_API_PREFIX, FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_V2_PREFIX, ) from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.config.server import ListenerConfig from synapse.federation import send_queue from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.presence import ( BasePresenceHandler, PresenceState, get_interested_parties, ) from synapse.http.server import JsonResource, OptionsResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.http.presence import ( ReplicationBumpPresenceActiveTime, ReplicationPresenceSetState, ) from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, GroupServerStream, PresenceStream, PushersStream, PushRulesStream, ReceiptsStream, TagAccountDataStream, ToDeviceStream, ) from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client.v1 import events from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet from synapse.rest.client.v1.login import LoginRestServlet from synapse.rest.client.v1.profile import ( ProfileAvatarURLRestServlet, ProfileDisplaynameRestServlet, ProfileRestServlet, ) from synapse.rest.client.v1.push_rule import PushRuleRestServlet from synapse.rest.client.v1.room import ( JoinedRoomMemberListRestServlet, JoinRoomAliasServlet, PublicRoomListRestServlet, RoomEventContextServlet, RoomInitialSyncRestServlet, RoomMemberListRestServlet, RoomMembershipRestServlet, RoomMessageListRestServlet, RoomSendEventRestServlet, RoomStateEventRestServlet, RoomStateRestServlet, RoomTypingRestServlet, ) from synapse.rest.client.v1.voip import VoipRestServlet from synapse.rest.client.v2_alpha import groups, sync, user_directory from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.client.v2_alpha.account import ThreepidRestServlet from synapse.rest.client.v2_alpha.account_data import ( AccountDataServlet, RoomAccountDataServlet, ) from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.rest.client.versions import VersionsRestServlet from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") class PresenceStatusStubServlet(RestServlet): """If presence is disabled this servlet can be used to stub out setting presence status. """ PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status") def __init__(self, hs): super().__init__() self.auth = hs.get_auth() async def on_GET(self, request, user_id): await self.auth.get_user_by_req(request) return 200, {"presence": "offline"} async def on_PUT(self, request, user_id): await self.auth.get_user_by_req(request) return 200, {} class KeyUploadServlet(RestServlet): """An implementation of the `KeyUploadServlet` that responds to read only requests, but otherwise proxies through to the master instance. """ PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): server """ super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.http_client = hs.get_simple_http_client() self.main_uri = hs.config.worker_main_http_uri async def on_POST(self, request, device_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() body = parse_json_object_from_request(request) if device_id is not None: # passing the device_id here is deprecated; however, we allow it # for now for compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: logger.warning( "Client uploading keys for a different device " "(logged in as %s, uploading for %s)", requester.device_id, device_id, ) else: device_id = requester.device_id if device_id is None: raise SynapseError( 400, "To upload keys, you must pass device_id when authenticating" ) if body: # They're actually trying to upload something, proxy to main synapse. # Proxy headers from the original request, such as the auth headers # (in case the access token is there) and the original IP / # User-Agent of the request. headers = { header: request.requestHeaders.getRawHeaders(header, []) for header in (b"Authorization", b"User-Agent") } # Add the previous hop the the X-Forwarded-For header. x_forwarded_for = request.requestHeaders.getRawHeaders( b"X-Forwarded-For", [] ) if isinstance(request.client, (address.IPv4Address, address.IPv6Address)): previous_host = request.client.host.encode("ascii") # If the header exists, add to the comma-separated list of the first # instance of the header. Otherwise, generate a new header. if x_forwarded_for: x_forwarded_for = [ x_forwarded_for[0] + b", " + previous_host ] + x_forwarded_for[1:] else: x_forwarded_for = [previous_host] headers[b"X-Forwarded-For"] = x_forwarded_for try: result = await self.http_client.post_json_get_json( self.main_uri + request.uri.decode("ascii"), body, headers=headers ) except HttpResponseException as e: raise e.to_synapse_error() from e except RequestSendFailed as e: raise SynapseError(502, "Failed to talk to master") from e return 200, result else: # Just interested in counts. result = await self.store.count_e2e_one_time_keys(user_id, device_id) return 200, {"one_time_key_counts": result} class _NullContextManager(ContextManager[None]): """A context manager which does nothing.""" def __exit__(self, exc_type, exc_val, exc_tb): pass UPDATE_SYNCING_USERS_MS = 10 * 1000 class GenericWorkerPresence(BasePresenceHandler): def __init__(self, hs): super().__init__(hs) self.hs = hs self.is_mine_id = hs.is_mine_id self._presence_enabled = hs.config.use_presence # The number of ongoing syncs on this process, by user id. # Empty if _presence_enabled is false. self._user_to_num_current_syncs = {} # type: Dict[str, int] self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() # user_id -> last_sync_ms. Lists the users that have stopped syncing # but we haven't notified the master of that yet self.users_going_offline = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) self._send_stop_syncing_loop = self.clock.looping_call( self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) hs.get_reactor().addSystemEventTrigger( "before", "shutdown", run_as_background_process, "generic_presence.on_shutdown", self._on_shutdown, ) def _on_shutdown(self): if self._presence_enabled: self.hs.get_tcp_replication().send_command( ClearUserSyncsCommand(self.instance_id) ) def send_user_sync(self, user_id, is_syncing, last_sync_ms): if self._presence_enabled: self.hs.get_tcp_replication().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) def mark_as_coming_online(self, user_id): """A user has started syncing. Send a UserSync to the master, unless they had recently stopped syncing. Args: user_id (str) """ going_offline = self.users_going_offline.pop(user_id, None) if not going_offline: # Safe to skip because we haven't yet told the master they were offline self.send_user_sync(user_id, True, self.clock.time_msec()) def mark_as_going_offline(self, user_id): """A user has stopped syncing. We wait before notifying the master as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing notification to the master Args: user_id (str) """ self.users_going_offline[user_id] = self.clock.time_msec() def send_stop_syncing(self): """Check if there are any users who have stopped syncing a while ago and haven't come back yet. If there are poke the master about them. """ now = self.clock.time_msec() for user_id, last_sync_ms in list(self.users_going_offline.items()): if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: self.users_going_offline.pop(user_id, None) self.send_user_sync(user_id, False, last_sync_ms) async def user_syncing( self, user_id: str, affect_presence: bool ) -> ContextManager[None]: """Record that a user is syncing. Called by the sync and events servlets to record that a user has connected to this worker and is waiting for some events. """ if not affect_presence or not self._presence_enabled: return _NullContextManager() curr_sync = self._user_to_num_current_syncs.get(user_id, 0) self._user_to_num_current_syncs[user_id] = curr_sync + 1 # If we went from no in flight sync to some, notify replication if self._user_to_num_current_syncs[user_id] == 1: self.mark_as_coming_online(user_id) def _end(): # We check that the user_id is in user_to_num_current_syncs because # user_to_num_current_syncs may have been cleared if we are # shutting down. if user_id in self._user_to_num_current_syncs: self._user_to_num_current_syncs[user_id] -= 1 # If we went from one in flight sync to non, notify replication if self._user_to_num_current_syncs[user_id] == 0: self.mark_as_going_offline(user_id) @contextlib.contextmanager def _user_syncing(): try: yield finally: _end() return _user_syncing() async def notify_from_replication(self, states, stream_id): parties = await get_interested_parties(self.store, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( "presence_key", stream_id, rooms=room_ids_to_states.keys(), users=users_to_states.keys(), ) async def process_replication_rows(self, token, rows): states = [ UserPresenceState( row.user_id, row.state, row.last_active_ts, row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg, row.currently_active, ) for row in rows ] for state in states: self.user_to_current_state[state.user_id] = state stream_id = token await self.notify_from_replication(states, stream_id) def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [ user_id for user_id, count in self._user_to_num_current_syncs.items() if count > 0 ] async def set_state(self, target_user, state, ignore_status_msg=False): """Set the presence state of the user. """ presence = state["presence"] valid_presence = ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE, ) if presence not in valid_presence: raise SynapseError(400, "Invalid presence state") user_id = target_user.to_string() # If presence is disabled, no-op if not self.hs.config.use_presence: return # Proxy request to master await self._set_state_client( user_id=user_id, state=state, ignore_status_msg=ignore_status_msg ) async def bump_presence_active_time(self, user): """We've seen the user do something that indicates they're interacting with the app. """ # If presence is disabled, no-op if not self.hs.config.use_presence: return # Proxy request to master user_id = user.to_string() await self._bump_active_client(user_id=user_id) class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, StatsStore, UIAuthWorkerStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedReceiptsStore, SlavedPushRuleStore, SlavedGroupServerStore, SlavedAccountDataStore, SlavedPusherStore, CensorEventsStore, ClientIpWorkerStore, SlavedEventStore, SlavedKeyStore, RoomStore, DirectoryStore, SlavedApplicationServiceStore, SlavedRegistrationStore, SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, SlavedPresenceStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, SearchWorkerStore, TransactionWorkerStore, BaseSlavedStore, ): pass class GenericWorkerServer(HomeServer): DATASTORE_CLASS = GenericWorkerSlavedStore def _listen_http(self, listener_config: ListenerConfig): port = listener_config.port bind_addresses = listener_config.bind_addresses assert listener_config.http_options is not None site_tag = listener_config.http_options.tag if site_tag is None: site_tag = port # We always include a health resource. resources = {"/health": HealthResource()} for res in listener_config.http_options.resources: for name in res.names: if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) elif name == "client": resource = JsonResource(self, canonical_json=False) PublicRoomListRestServlet(self).register(resource) RoomMemberListRestServlet(self).register(resource) JoinedRoomMemberListRestServlet(self).register(resource) RoomStateRestServlet(self).register(resource) RoomEventContextServlet(self).register(resource) RoomMessageListRestServlet(self).register(resource) RegisterRestServlet(self).register(resource) LoginRestServlet(self).register(resource) ThreepidRestServlet(self).register(resource) KeyQueryServlet(self).register(resource) KeyChangesServlet(self).register(resource) VoipRestServlet(self).register(resource) PushRuleRestServlet(self).register(resource) VersionsRestServlet(self).register(resource) RoomSendEventRestServlet(self).register(resource) RoomMembershipRestServlet(self).register(resource) RoomStateEventRestServlet(self).register(resource) JoinRoomAliasServlet(self).register(resource) ProfileAvatarURLRestServlet(self).register(resource) ProfileDisplaynameRestServlet(self).register(resource) ProfileRestServlet(self).register(resource) KeyUploadServlet(self).register(resource) AccountDataServlet(self).register(resource) RoomAccountDataServlet(self).register(resource) RoomTypingRestServlet(self).register(resource) sync.register_servlets(self, resource) events.register_servlets(self, resource) InitialSyncRestServlet(self).register(resource) RoomInitialSyncRestServlet(self).register(resource) user_directory.register_servlets(self, resource) # If presence is disabled, use the stub servlet that does # not allow sending presence if not self.config.use_presence: PresenceStatusStubServlet(self).register(resource) groups.register_servlets(self, resource) resources.update({CLIENT_API_PREFIX: resource}) elif name == "federation": resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) elif name == "media": if self.config.can_load_media_repo: media_repo = self.get_media_repository_resource() # We need to serve the admin servlets for media on the # worker. admin_resource = JsonResource(self, canonical_json=False) register_servlets_for_media_repo(self, admin_resource) resources.update( { MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo, "/_synapse/admin": admin_resource, } ) else: logger.warning( "A 'media' listener is configured but the media" " repository is disabled. Ignoring." ) if name == "openid" and "federation" not in res.names: # Only load the openid resource separately if federation resource # is not specified since federation resource includes openid # resource. resources.update( { FEDERATION_PREFIX: TransportLayerServer( self, servlet_groups=["openid"] ) } ) if name in ["keys", "federation"]: resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "replication": resources[REPLICATION_PREFIX] = ReplicationRestResource(self) root_resource = create_resource_tree(resources, OptionsResource()) _base.listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), reactor=self.get_reactor(), ) logger.info("Synapse worker now listening on port %d", port) def start_listening(self, listeners: Iterable[ListenerConfig]): for listener in listeners: if listener.type == "http": self._listen_http(listener) elif listener.type == "manhole": _base.listen_tcp( listener.bind_addresses, listener.port, manhole( username="matrix", password="rabbithole", globals={"hs": self} ), ) elif listener.type == "metrics": if not self.get_config().enable_metrics: logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" ) ) else: _base.listen_metrics(listener.bind_addresses, listener.port) else: logger.warning("Unsupported listener type: %s", listener.type) self.get_tcp_replication().start_replication(self) async def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) @cache_in_self def get_replication_data_handler(self): return GenericWorkerReplicationHandler(self) @cache_in_self def get_presence_handler(self): return GenericWorkerPresence(self) class GenericWorkerReplicationHandler(ReplicationDataHandler): def __init__(self, hs): super().__init__(hs) self.store = hs.get_datastore() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence self.notifier = hs.get_notifier() self.notify_pushers = hs.config.start_pushers self.pusher_pool = hs.get_pusherpool() self.send_handler = None # type: Optional[FederationSenderHandler] if hs.config.send_federation: self.send_handler = FederationSenderHandler(hs) async def on_rdata(self, stream_name, instance_name, token, rows): await super().on_rdata(stream_name, instance_name, token, rows) await self._process_and_notify(stream_name, instance_name, token, rows) async def _process_and_notify(self, stream_name, instance_name, token, rows): try: if self.send_handler: await self.send_handler.process_replication_rows( stream_name, token, rows ) if stream_name == PushRulesStream.NAME: self.notifier.on_new_event( "push_rules_key", token, users=[row.user_id for row in rows] ) elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): self.notifier.on_new_event( "account_data_key", token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: self.notifier.on_new_event( "receipt_key", token, rooms=[row.room_id for row in rows] ) await self.pusher_pool.on_new_receipts( token, token, {row.room_id for row in rows} ) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: self.notifier.on_new_event("to_device_key", token, users=entities) elif stream_name == DeviceListsStream.NAME: all_room_ids = set() # type: Set[str] for row in rows: if row.entity.startswith("@"): room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) elif stream_name == PresenceStream.NAME: await self.presence_handler.process_replication_rows(token, rows) elif stream_name == GroupServerStream.NAME: self.notifier.on_new_event( "groups_key", token, users=[row.user_id for row in rows] ) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: await self.start_pusher(row.user_id, row.app_id, row.pushkey) except Exception: logger.exception("Error processing replication") async def on_position(self, stream_name: str, instance_name: str, token: int): await super().on_position(stream_name, instance_name, token) # Also call on_rdata to ensure that stream positions are properly reset. await self.on_rdata(stream_name, instance_name, token, []) def stop_pusher(self, user_id, app_id, pushkey): if not self.notify_pushers: return key = "%s:%s" % (app_id, pushkey) pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) pusher = pushers_for_user.pop(key, None) if pusher is None: return logger.info("Stopping pusher %r / %r", user_id, key) pusher.on_stop() async def start_pusher(self, user_id, app_id, pushkey): if not self.notify_pushers: return key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" # Let's wake up the transaction queue for the server in case we have # pending stuff to send to it. if self.send_handler: self.send_handler.wake_destination(server) class FederationSenderHandler: """Processes the fedration replication stream This class is only instantiate on the worker responsible for sending outbound federation transactions. It receives rows from the replication stream and forwards the appropriate entries to the FederationSender class. """ def __init__(self, hs: GenericWorkerServer): self.store = hs.get_datastore() self._is_mine_id = hs.is_mine_id self.federation_sender = hs.get_federation_sender() self._hs = hs # Stores the latest position in the federation stream we've gotten up # to. This is always set before we use it. self.federation_position = None self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") def on_start(self): # There may be some events that are persisted but haven't been sent, # so send them now. self.federation_sender.notify_new_events( self.store.get_room_max_stream_ordering() ) def wake_destination(self, server: str): self.federation_sender.wake_destination(server) async def process_replication_rows(self, stream_name, token, rows): # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) await self.update_token(token) # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: await self._on_new_receipts(rows) # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local # clients and remote servers, so we ignore entities that start with # '@' (since they'll be local users rather than destinations). hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) async def _on_new_receipts(self, rows): """ Args: rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): new receipts to be processed """ for receipt in rows: # we only want to send on receipts for our own users if not self._is_mine_id(receipt.user_id): continue receipt_info = ReadReceipt( receipt.room_id, receipt.receipt_type, receipt.user_id, [receipt.event_id], receipt.data, ) await self.federation_sender.send_read_receipt(receipt_info) async def update_token(self, token): """Update the record of where we have processed to in the federation stream. Called after we have processed a an update received over replication. Sends a FEDERATION_ACK back to the master, and stores the token that we have processed in `federation_stream_position` so that we can restart where we left off. """ self.federation_position = token # We save and send the ACK to master asynchronously, so we don't block # processing on persistence. We don't need to do this operation for # every single RDATA we receive, we just need to do it periodically. if self._fed_position_linearizer.is_queued(None): # There is already a task queued up to save and send the token, so # no need to queue up another task. return run_as_background_process("_save_and_send_ack", self._save_and_send_ack) async def _save_and_send_ack(self): """Save the current federation position in the database and send an ACK to master with where we're up to. """ try: # We linearize here to ensure we don't have races updating the token # # XXX this appears to be redundant, since the ReplicationCommandHandler # has a linearizer which ensures that we only process one line of # replication data at a time. Should we remove it, or is it doing useful # service for robustness? Or could we replace it with an assertion that # we're not being re-entered? with (await self._fed_position_linearizer.queue(None)): # We persist and ack the same position, so we take a copy of it # here as otherwise it can get modified from underneath us. current_position = self.federation_position await self.store.update_federation_out_pos( "federation", current_position ) # We ACK this token over replication so that the master can drop # its in memory queues self._hs.get_tcp_replication().send_federation_ack(current_position) except Exception: logger.exception("Error updating federation stream position") def start(config_options): try: config = HomeServerConfig.load_config("Synapse worker", config_options) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) # For backwards compatibility let any of the old app names. assert config.worker_app in ( "synapse.app.appservice", "synapse.app.client_reader", "synapse.app.event_creator", "synapse.app.federation_reader", "synapse.app.federation_sender", "synapse.app.frontend_proxy", "synapse.app.generic_worker", "synapse.app.media_repository", "synapse.app.pusher", "synapse.app.synchrotron", "synapse.app.user_dir", ) if config.worker_app == "synapse.app.appservice": if config.appservice.notify_appservices: sys.stderr.write( "\nThe appservices must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``notify_appservices: false`` to the main config" "\n" ) sys.exit(1) # Force the appservice to start since they will be disabled in the main config config.appservice.notify_appservices = True else: # For other worker types we force this to off. config.appservice.notify_appservices = False if config.worker_app == "synapse.app.pusher": if config.server.start_pushers: sys.stderr.write( "\nThe pushers must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``start_pushers: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.start_pushers = True else: # For other worker types we force this to off. config.server.start_pushers = False if config.worker_app == "synapse.app.user_dir": if config.server.update_user_directory: sys.stderr.write( "\nThe update_user_directory must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``update_user_directory: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.update_user_directory = True else: # For other worker types we force this to off. config.server.update_user_directory = False if config.worker_app == "synapse.app.federation_sender": if config.worker.send_federation: sys.stderr.write( "\nThe send_federation must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``send_federation: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.worker.send_federation = True else: # For other worker types we force this to off. config.worker.send_federation = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts hs = GenericWorkerServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) setup_logging(hs, config, use_worker_options=True) hs.setup() # Ensure the replication streamer is always started in case we write to any # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() reactor.addSystemEventTrigger( "before", "startup", _base.start, hs, config.worker_listeners ) _base.start_worker_reactor("synapse-generic-worker", config) if __name__ == "__main__": with LoggingContext("main"): start(sys.argv[1:])
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_2
crossvul-python_data_good_2548_0
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2012 Red Hat, Inc. # Copyright (C) 2008 Ricky Zhou # This file is part of python-fedora # # python-fedora is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # python-fedora is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with python-fedora; if not, see <http://www.gnu.org/licenses/> # ''' Miscellaneous functions of use on a TurboGears Server .. versionchanged:: 0.3.14 Save the original turbogears.url function as :func:`fedora.tg.util.tg_url` .. versionchanged:: 0.3.17 Renamed from fedora.tg.util .. versionchanged:: 0.3.25 Renamed from fedora.tg.tg1utils .. moduleauthor:: Toshio Kuratomi <tkuratom@redhat.com> .. moduleauthor:: Ricky Zhou <ricky@fedoraproject.org> ''' from itertools import chain import cgi import os import cherrypy from cherrypy import request from decorator import decorator import pkg_resources import turbogears from turbogears import flash, redirect, config, identity import turbogears.util as tg_util from turbogears.controllers import check_app_root from turbogears.identity.exceptions import RequestRequiredException import six from six.moves.urllib.parse import urlencode, urlparse, urlunparse # Save this for people who need the original url() function tg_url = turbogears.url def add_custom_stdvars(new_vars): return new_vars.update({'fedora_template': fedora_template}) def url(tgpath, tgparams=None, **kwargs): '''Computes URLs. This is a replacement for :func:`turbogears.controllers.url` (aka :func:`tg.url` in the template). In addition to the functionality that :func:`tg.url` provides, it adds a token to prevent :term:`CSRF` attacks. :arg tgpath: a list or a string. If the path is absolute (starts with a "/"), the :attr:`server.webpath`, :envvar:`SCRIPT_NAME` and the approot of the application are prepended to the path. In order for the approot to be detected properly, the root object should extend :class:`turbogears.controllers.RootController`. :kwarg tgparams: See param: ``kwargs`` :kwarg kwargs: Query parameters for the URL can be passed in as a dictionary in the second argument *or* as keyword parameters. Values which are a list or a tuple are used to create multiple key-value pairs. :returns: The changed path .. versionadded:: 0.3.10 Modified from turbogears.controllers.url for :ref:`CSRF-Protection` ''' if not isinstance(tgpath, six.string_types): tgpath = '/'.join(list(tgpath)) if not tgpath.startswith('/'): # Do not allow the url() function to be used for external urls. # This function is primarily used in redirect() calls, so this prevents # covert redirects and thus CSRF leaking. tgpath = '/' if tgpath.startswith('/'): webpath = (config.get('server.webpath') or '').rstrip('/') if tg_util.request_available(): check_app_root() tgpath = request.app_root + tgpath try: webpath += request.wsgi_environ['SCRIPT_NAME'].rstrip('/') except (AttributeError, KeyError): # pylint: disable-msg=W0704 # :W0704: Lack of wsgi environ is fine... we still have # server.webpath pass tgpath = webpath + tgpath if tgparams is None: tgparams = kwargs else: try: tgparams = tgparams.copy() tgparams.update(kwargs) except AttributeError: raise TypeError( 'url() expects a dictionary for query parameters') args = [] # Add the _csrf_token try: if identity.current.csrf_token: tgparams.update({'_csrf_token': identity.current.csrf_token}) except RequestRequiredException: # pylint: disable-msg=W0704 # :W0704: If we are outside of a request (called from non-controller # methods/ templates) just don't set the _csrf_token. pass # Check for query params in the current url query_params = six.iteritems(tgparams) scheme, netloc, path, params, query_s, fragment = urlparse(tgpath) if query_s: query_params = chain((p for p in cgi.parse_qsl(query_s) if p[0] != '_csrf_token'), query_params) for key, value in query_params: if value is None: continue if isinstance(value, (list, tuple)): pairs = [(key, v) for v in value] else: pairs = [(key, value)] for key, value in pairs: if value is None: continue if isinstance(value, unicode): value = value.encode('utf8') args.append((key, str(value))) query_string = urlencode(args, True) tgpath = urlunparse((scheme, netloc, path, params, query_string, fragment)) return tgpath # this is taken from turbogears 1.1 branch def _get_server_name(): """Return name of the server this application runs on. Respects 'Host' and 'X-Forwarded-Host' header. See the docstring of the 'absolute_url' function for more information. .. note:: This comes from turbogears 1.1 branch. It is only needed for _tg_absolute_url(). If we find that turbogears.get_server_name() exists, we replace this function with that one. """ get = config.get h = request.headers host = get('tg.url_domain') or h.get('X-Forwarded-Host', h.get('Host')) if not host: host = '%s:%s' % (get('server.socket_host', 'localhost'), get('server.socket_port', 8080)) return host # this is taken from turbogears 1.1 branch def tg_absolute_url(tgpath='/', params=None, **kw): """Return absolute URL (including schema and host to this server). Tries to account for 'Host' header and reverse proxying ('X-Forwarded-Host'). The host name is determined this way: * If the config setting 'tg.url_domain' is set and non-null, use this value. * Else, if the 'base_url_filter.use_x_forwarded_host' config setting is True, use the value from the 'Host' or 'X-Forwarded-Host' request header. * Else, if config setting 'base_url_filter.on' is True and 'base_url_filter.base_url' is non-null, use its value for the host AND scheme part of the URL. * As a last fallback, use the value of 'server.socket_host' and 'server.socket_port' config settings (defaults to 'localhost:8080'). The URL scheme ('http' or 'http') used is determined in the following way: * If 'base_url_filter.base_url' is used, use the scheme from this URL. * If there is a 'X-Use-SSL' request header, use 'https'. * Else, if the config setting 'tg.url_scheme' is set, use its value. * Else, use the value of 'cherrypy.request.scheme'. .. note:: This comes from turbogears 1.1 branch with one change: we call tg_url() rather than turbogears.url() so that it never adds the csrf_token .. versionadded:: 0.3.19 Modified from turbogears.absolute_url() for :ref:`CSRF-Protection` """ get = config.get use_xfh = get('base_url_filter.use_x_forwarded_host', False) if request.headers.get('X-Use-SSL'): scheme = 'https' else: scheme = get('tg.url_scheme') if not scheme: scheme = request.scheme base_url = '%s://%s' % (scheme, _get_server_name()) if get('base_url_filter.on', False) and not use_xfh: base_url = get('base_url_filter.base_url').rstrip('/') return '%s%s' % (base_url, tg_url(tgpath, params, **kw)) def absolute_url(tgpath='/', params=None, **kw): """Return absolute URL (including schema and host to this server). Tries to account for 'Host' header and reverse proxying ('X-Forwarded-Host'). The host name is determined this way: * If the config setting 'tg.url_domain' is set and non-null, use this value. * Else, if the 'base_url_filter.use_x_forwarded_host' config setting is True, use the value from the 'Host' or 'X-Forwarded-Host' request header. * Else, if config setting 'base_url_filter.on' is True and 'base_url_filter.base_url' is non-null, use its value for the host AND scheme part of the URL. * As a last fallback, use the value of 'server.socket_host' and 'server.socket_port' config settings (defaults to 'localhost:8080'). The URL scheme ('http' or 'http') used is determined in the following way: * If 'base_url_filter.base_url' is used, use the scheme from this URL. * If there is a 'X-Use-SSL' request header, use 'https'. * Else, if the config setting 'tg.url_scheme' is set, use its value. * Else, use the value of 'cherrypy.request.scheme'. .. versionadded:: 0.3.19 Modified from turbogears.absolute_url() for :ref:`CSRF-Protection` """ return url(tg_absolute_url(tgpath, params, **kw)) def enable_csrf(): '''A startup function to setup :ref:`CSRF-Protection`. This should be run at application startup. Code like the following in the start-APP script or the method in :file:`commands.py` that starts it:: from turbogears import startup from fedora.tg.util import enable_csrf startup.call_on_startup.append(enable_csrf) If we can get the :ref:`CSRF-Protection` into upstream :term:`TurboGears`, we might be able to remove this in the future. .. versionadded:: 0.3.10 Added to enable :ref:`CSRF-Protection` ''' # Override the turbogears.url function with our own # Note, this also changes turbogears.absolute_url since that calls # turbogears.url turbogears.url = url turbogears.controllers.url = url # Ignore the _csrf_token parameter ignore = config.get('tg.ignore_parameters', []) if '_csrf_token' not in ignore: ignore.append('_csrf_token') config.update({'tg.ignore_parameters': ignore}) # Add a function to the template tg stdvars that looks up a template. turbogears.view.variable_providers.append(add_custom_stdvars) def request_format(): '''Return the output format that was requested by the user. The user is able to specify a specific output format using either the ``Accept:`` HTTP header or the ``tg_format`` query parameter. This function checks both of those to determine what format the reply should be in. :rtype: string :returns: The requested format. If none was specified, 'default' is returned .. versionchanged:: 0.3.17 Return symbolic names for json, html, xhtml, and xml instead of letting raw mime types through ''' output_format = cherrypy.request.params.get('tg_format', '').lower() if not output_format: ### TODO: Two problems with this: # 1) TG lets this be extended via as_format and accept_format. We need # tie into that as well somehow. # 2) Decide whether to standardize on "json" or "application/json" accept = tg_util.simplify_http_accept_header( request.headers.get('Accept', 'default').lower()) if accept in ('text/javascript', 'application/json'): output_format = 'json' elif accept == 'text/html': output_format = 'html' elif accept == 'text/plain': output_format = 'plain' elif accept == 'text/xhtml': output_format = 'xhtml' elif accept == 'text/xml': output_format = 'xml' else: output_format = accept return output_format def jsonify_validation_errors(): '''Return an error for :term:`JSON` if validation failed. This function checks for two things: 1) We're expected to return :term:`JSON` data. 2) There were errors in the validation process. If both of those are true, this function constructs a response that will return the validation error messages as :term:`JSON` data. All controller methods that are error_handlers need to use this:: @expose(template='templates.numberform') def enter_number(self, number): errors = fedora.tg.util.jsonify_validation_errors() if errors: return errors [...] @expose(allow_json=True) @error_handler(enter_number) @validate(form=number_form) def save(self, number): return dict(success=True) :rtype: None or dict :Returns: None if there are no validation errors or :term:`JSON` isn't requested, otherwise a dictionary with the error that's suitable for return from the controller. The error message is set in tg_flash whether :term:`JSON` was requested or not. ''' # Check for validation errors errors = getattr(cherrypy.request, 'validation_errors', None) if not errors: return None # Set the message for both html and json output message = u'\n'.join([u'%s: %s' % (param, msg) for param, msg in errors.items()]) format = request_format() if format in ('html', 'xhtml'): message.translate({ord('\n'): u'<br />\n'}) flash(message) # If json, return additional information to make this an exception if format == 'json': # Note: explicit setting of tg_template is needed in TG < 1.0.4.4 # A fix has been applied for TG-1.0.4.5 return dict(exc='Invalid', tg_template='json') return None def json_or_redirect(forward_url): '''If :term:`JSON` is requested, return a dict, otherwise redirect. This is a decorator to use with a method that returns :term:`JSON` by default. If :term:`JSON` is requested, then it will return the dict from the method. If :term:`JSON` is not requested, it will redirect to the given URL. The method that is decorated should be constructed so that it calls turbogears.flash() with a message that will be displayed on the forward_url page. Use it like this:: import turbogears @json_or_redirect('http://localhost/calc/') @expose(allow_json=True) def divide(self, dividend, divisor): try: answer = dividend * 1.0 / divisor except ZeroDivisionError: turbogears.flash('Division by zero not allowed') return dict(exc='ZeroDivisionError') turbogears.flash('The quotient is %s' % answer) return dict(quotient=answer) In the example, we return either an exception or an answer, using :func:`turbogears.flash` to tell people of the result in either case. If :term:`JSON` data is requested, the user will get back a :term:`JSON` string with the proper information. If html is requested, we will be redirected to 'http://localhost/calc/' where the flashed message will be displayed. :arg forward_url: If :term:`JSON` was not requested, redirect to this URL after. .. versionadded:: 0.3.7 To make writing methods that use validation easier ''' def call(func, *args, **kwargs): if request_format() == 'json': return func(*args, **kwargs) else: func(*args, **kwargs) raise redirect(forward_url) return decorator(call) if hasattr(turbogears, 'get_server_name'): _get_server_name = turbogears.get_server_name def fedora_template(template, template_type='genshi'): '''Function to return the path to a template. :arg template: filename of the template itself. Ex: login.html :kwarg template_type: template language we need the template written in Defaults to 'genshi' :returns: filesystem path to the template ''' # :E1101: pkg_resources does have resource_filename # pylint: disable-msg=E1101 return pkg_resources.resource_filename( 'fedora', os.path.join('tg', 'templates', template_type, template)) __all__ = ( 'add_custom_stdvars', 'absolute_url', 'enable_csrf', 'fedora_template', 'jsonify_validation_errors', 'json_or_redirect', 'request_format', 'tg_absolute_url', 'tg_url', 'url')
./CrossVul/dataset_final_sorted/CWE-601/py/good_2548_0
crossvul-python_data_bad_1915_4
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib from collections import defaultdict import attr from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, is_signing_algorithm_supported, ) from signedjson.sign import ( SignatureVerifyException, encode_canonical_json, signature_ids, verify_signed_json, ) from unpaddedbase64 import decode_base64 from twisted.internet import defer from synapse.api.errors import ( Codes, HttpResponseException, RequestSendFailed, SynapseError, ) from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, preserve_fn, run_in_background, ) from synapse.storage.keys import FetchKeyResult from synapse.util import unwrapFirstError from synapse.util.async_helpers import yieldable_gather_results from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @attr.s(slots=True, cmp=False) class VerifyJsonRequest: """ A request to verify a JSON object. Attributes: server_name(str): The name of the server to verify against. key_ids(set[str]): The set of key_ids to that could be used to verify the JSON object json_object(dict): The JSON object to verify. minimum_valid_until_ts (int): time at which we require the signing key to be valid. (0 implies we don't care) key_ready (Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. If we are unable to find a key which satisfies the request, the deferred errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() request_name = attr.ib() key_ids = attr.ib(init=False) key_ready = attr.ib(default=attr.Factory(defer.Deferred)) def __attrs_post_init__(self): self.key_ids = signature_ids(self.json_object, self.server_name) class KeyLookupError(ValueError): pass class Keyring: def __init__(self, hs, key_fetchers=None): self.clock = hs.get_clock() if key_fetchers is None: key_fetchers = ( StoreKeyFetcher(hs), PerspectivesKeyFetcher(hs), ServerKeyFetcher(hs), ) self._key_fetchers = key_fetchers # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download # completes. # # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} def verify_json_for_server( self, server_name, json_object, validity_time, request_name ): """Verify that a JSON object has been signed by a given server Args: server_name (str): name of the server which must have signed this object json_object (dict): object to be checked validity_time (int): timestamp at which we require the signing key to be valid. (0 implies we don't care) request_name (str): an identifier for this json object (eg, an event id) for logging. Returns: Deferred[None]: completes if the the object was correctly signed, otherwise errbacks with an error """ req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) requests = (req,) return make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: server_and_json (iterable[Tuple[str, dict, int, str]): Iterable of (server_name, json_object, validity_time, request_name) tuples. validity_time is a timestamp at which the signing key must be valid. request_name is an identifier for this json object (eg, an event id) for logging. Returns: List<Deferred[None]>: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ return self._verify_objects( VerifyJsonRequest(server_name, json_object, validity_time, request_name) for server_name, json_object, validity_time, request_name in server_and_json ) def _verify_objects(self, verify_requests): """Does the work of verify_json_[objects_]for_server Args: verify_requests (iterable[VerifyJsonRequest]): Iterable of verification requests. Returns: List<Deferred[None]>: for each input item, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ # a list of VerifyJsonRequests which are awaiting a key lookup key_lookups = [] handle = preserve_fn(_handle_key_deferred) def process(verify_request): """Process an entry in the request list Adds a key request to key_lookups, and returns a deferred which will complete or fail (in the sentinel context) when verification completes. """ if not verify_request.key_ids: return defer.fail( SynapseError( 400, "Not signed by %s" % (verify_request.server_name,), Codes.UNAUTHORIZED, ) ) logger.debug( "Verifying %s for %s with key_ids %s, min_validity %i", verify_request.request_name, verify_request.server_name, verify_request.key_ids, verify_request.minimum_valid_until_ts, ) # add the key request to the queue, but don't start it off yet. key_lookups.append(verify_request) # now run _handle_key_deferred, which will wait for the key request # to complete and then do the verification. # # We want _handle_key_request to log to the right context, so we # wrap it with preserve_fn (aka run_in_background) return handle(verify_request) results = [process(r) for r in verify_requests] if key_lookups: run_in_background(self._start_key_lookups, key_lookups) return results async def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyJsonRequest]): """ try: # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) # Wait for any previous lookups to complete before proceeding. await self.wait_for_previous_lookups(server_to_request_ids.keys()) # take out a lock on each of the servers by sticking a Deferred in # key_downloads for server_name in server_to_request_ids.keys(): self.key_downloads[server_name] = defer.Deferred() logger.debug("Got key lookup lock on %s", server_name) # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. def drop_server_lock(server_name): d = self.key_downloads.pop(server_name) d.callback(None) def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] server_requests.remove(id(verify_request)) # if there are no more requests for this server, we can drop the lock. if not server_requests: logger.debug("Releasing key lookup lock on %s", server_name) drop_server_lock(server_name) return res for verify_request in verify_requests: verify_request.key_ready.addBoth(lookup_done, verify_request) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups") async def wait_for_previous_lookups(self, server_names) -> None: """Waits for any previous key lookups for the given servers to finish. Args: server_names (Iterable[str]): list of servers which we want to look up Returns: Resolves once all key lookups for the given servers have completed. Follows the synapse rules of logcontext preservation. """ loop_count = 1 while True: wait_on = [ (server_name, self.key_downloads[server_name]) for server_name in server_names if server_name in self.key_downloads ] if not wait_on: break logger.info( "Waiting for existing lookups for %s to complete [loop %i]", [w[0] for w in wait_on], loop_count, ) with PreserveLoggingContext(): await defer.DeferredList((w[1] for w in wait_on)) loop_count += 1 def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request For each verify_request, verify_request.key_ready is called back with params (server_name, key_id, VerifyKey) if a key is found, or errbacked with a SynapseError if none of the keys are found. Args: verify_requests (list[VerifyJsonRequest]): list of verify requests """ remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called} async def do_iterations(): try: with Measure(self.clock, "get_server_verify_keys"): for f in self._key_fetchers: if not remaining_requests: return await self._attempt_key_fetches_with_fetcher( f, remaining_requests ) # look for any requests which weren't satisfied while remaining_requests: verify_request = remaining_requests.pop() rq_str = ( "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" % ( verify_request.server_name, verify_request.key_ids, verify_request.minimum_valid_until_ts, ) ) # If we run the errback immediately, it may cancel our # loggingcontext while we are still in it, so instead we # schedule it for the next time round the reactor. # # (this also ensures that we don't get a stack overflow if we # has a massive queue of lookups waiting for this server). self.clock.call_later( 0, verify_request.key_ready.errback, SynapseError( 401, "Failed to find any key to satisfy %s" % (rq_str,), Codes.UNAUTHORIZED, ), ) except Exception as err: # we don't really expect to get here, because any errors should already # have been caught and logged. But if we do, let's log the error and make # sure that all of the deferreds are resolved. logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): for verify_request in remaining_requests: if not verify_request.key_ready.called: verify_request.key_ready.errback(err) run_in_background(do_iterations) async def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): """Use a key fetcher to attempt to satisfy some key requests Args: fetcher (KeyFetcher): fetcher to use to fetch the keys remaining_requests (set[VerifyJsonRequest]): outstanding key requests. Any successfully-completed requests will be removed from the list. """ # dict[str, dict[str, int]]: keys to fetch. # server_name -> key_id -> min_valid_ts missing_keys = defaultdict(dict) for verify_request in remaining_requests: # any completed requests should already have been removed assert not verify_request.key_ready.called keys_for_server = missing_keys[verify_request.server_name] for key_id in verify_request.key_ids: # If we have several requests for the same key, then we only need to # request that key once, but we should do so with the greatest # min_valid_until_ts of the requests, so that we can satisfy all of # the requests. keys_for_server[key_id] = max( keys_for_server.get(key_id, -1), verify_request.minimum_valid_until_ts, ) results = await fetcher.get_keys(missing_keys) completed = [] for verify_request in remaining_requests: server_name = verify_request.server_name # see if any of the keys we got this time are sufficient to # complete this VerifyJsonRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: fetch_key_result = result_keys.get(key_id) if not fetch_key_result: # we didn't get a result for this key continue if ( fetch_key_result.valid_until_ts < verify_request.minimum_valid_until_ts ): # key was not valid at this point continue # we have a valid key for this request. If we run the callback # immediately, it may cancel our loggingcontext while we are still in # it, so instead we schedule it for the next time round the reactor. # # (this also ensures that we don't get a stack overflow if we had # a massive queue of lookups waiting for this server). logger.debug( "Found key %s:%s for %s", server_name, key_id, verify_request.request_name, ) self.clock.call_later( 0, verify_request.key_ready.callback, (server_name, key_id, fetch_key_result.verify_key), ) completed.append(verify_request) break remaining_requests.difference_update(completed) class KeyFetcher: async def get_keys(self, keys_to_fetch): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: map from server_name -> key_id -> FetchKeyResult """ raise NotImplementedError class StoreKeyFetcher(KeyFetcher): """KeyFetcher impl which fetches keys from our data store""" def __init__(self, hs): self.store = hs.get_datastore() async def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" keys_to_fetch = ( (server_name, key_id) for server_name, keys_for_server in keys_to_fetch.items() for key_id in keys_for_server.keys() ) res = await self.store.get_server_verify_keys(keys_to_fetch) keys = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key return keys class BaseV2KeyFetcher: def __init__(self, hs): self.store = hs.get_datastore() self.config = hs.get_config() async def process_v2_response(self, from_server, response_json, time_added_ms): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from GET /_matrix/key/v2/server, or a single entry from the list returned by POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. Args: from_server (str): the name of the server producing this result: either the origin server for a /_matrix/key/v2/server request, or the notary for a /_matrix/key/v2/query. response_json (dict): the json-decoded Server Keys response object time_added_ms (int): the timestamp to record in server_keys_json Returns: Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ ts_valid_until_ms = response_json["valid_until_ts"] # start by extracting the keys from the response, since they may be required # to validate the signature on the response. verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=ts_valid_until_ms ) server_name = response_json["server_name"] verified = False for key_id in response_json["signatures"].get(server_name, {}): key = verify_keys.get(key_id) if not key: # the key may not be present in verify_keys if: # * we got the key from the notary server, and: # * the key belongs to the notary server, and: # * the notary server is using a different key to sign notary # responses. continue verify_signed_json(response_json, server_name, key.verify_key) verified = True break if not verified: raise KeyLookupError( "Key response for %s is not signed by the origin server" % (server_name,) ) for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) key_json_bytes = encode_canonical_json(response_json) await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self.store.store_server_keys_json, server_name=server_name, key_id=key_id, from_server=from_server, ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=key_json_bytes, ) for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError) ) return verify_keys class PerspectivesKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the "perspectives" servers""" def __init__(self, hs): super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() self.key_servers = self.config.key_servers async def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" async def get_key(key_server): try: result = await self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server ) return result except KeyLookupError as e: logger.warning( "Key lookup failed from %r: %s", key_server.server_name, e ) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", key_server.server_name, type(e).__name__, str(e), ) return {} results = await make_deferred_yieldable( defer.gatherResults( [run_in_background(get_key, server) for server in self.key_servers], consumeErrors=True, ).addErrback(unwrapFirstError) ) union_of_keys = {} for result in results: for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) return union_of_keys async def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts key_server (synapse.config.key.TrustedKeyServer): notary server to query for the keys Returns: dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]: map from server_name -> key_id -> FetchKeyResult Raises: KeyLookupError if there was an error processing the entire response from the server """ perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", keys_to_fetch.items(), perspective_name, ) try: query_response = await self.client.post_json( destination=perspective_name, path="/_matrix/key/v2/query", data={ "server_keys": { server_name: { key_id: {"minimum_valid_until_ts": min_valid_ts} for key_id, min_valid_ts in server_keys.items() } for server_name, server_keys in keys_to_fetch.items() } }, ) except (NotRetryingDestination, RequestSendFailed) as e: # these both have str() representations which we can't really improve upon raise KeyLookupError(str(e)) except HttpResponseException as e: raise KeyLookupError("Remote server returned an error: %s" % (e,)) keys = {} added_keys = [] time_now_ms = self.clock.time_msec() for response in query_response["server_keys"]: # do this first, so that we can give useful errors thereafter server_name = response.get("server_name") if not isinstance(server_name, str): raise KeyLookupError( "Malformed response from key notary server %s: invalid server_name" % (perspective_name,) ) try: self._validate_perspectives_response(key_server, response) processed_response = await self.process_v2_response( perspective_name, response, time_added_ms=time_now_ms ) except KeyLookupError as e: logger.warning( "Error processing response from key notary server %s for origin " "server %s: %s", perspective_name, server_name, e, ) # we continue to process the rest of the response continue added_keys.extend( (server_name, key_id, key) for key_id, key in processed_response.items() ) keys.setdefault(server_name, {}).update(processed_response) await self.store.store_server_verify_keys( perspective_name, time_now_ms, added_keys ) return keys def _validate_perspectives_response(self, key_server, response): """Optionally check the signature on the result of a /key/query request Args: key_server (synapse.config.key.TrustedKeyServer): the notary server that produced this result response (dict): the json-decoded Server Keys response object """ perspective_name = key_server.server_name perspective_keys = key_server.verify_keys if perspective_keys is None: # signature checking is disabled on this server return if ( "signatures" not in response or perspective_name not in response["signatures"] ): raise KeyLookupError("Response not signed by the notary server") verified = False for key_id in response["signatures"][perspective_name]: if key_id in perspective_keys: verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" % ( list(response["signatures"][perspective_name].keys()), list(perspective_keys.keys()), ) ) class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" def __init__(self, hs): super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() async def get_keys(self, keys_to_fetch): """ Args: keys_to_fetch (dict[str, iterable[str]]): the keys to be fetched. server_name -> key_ids Returns: dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]: map from server_name -> key_id -> FetchKeyResult """ results = {} async def get_key(key_to_fetch_item): server_name, key_ids = key_to_fetch_item try: keys = await self.get_server_verify_key_v2_direct(server_name, key_ids) results[server_name] = keys except KeyLookupError as e: logger.warning( "Error looking up keys %s from %s: %s", key_ids, server_name, e ) except Exception: logger.exception("Error getting keys %s from %s", key_ids, server_name) await yieldable_gather_results(get_key, keys_to_fetch.items()) return results async def get_server_verify_key_v2_direct(self, server_name, key_ids): """ Args: server_name (str): key_ids (iterable[str]): Returns: dict[str, FetchKeyResult]: map from key ID to lookup result Raises: KeyLookupError if there was a problem making the lookup """ keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: # we may have found this key as a side-effect of asking for another. if requested_key_id in keys: continue time_now_ms = self.clock.time_msec() try: response = await self.client.get_json( destination=server_name, path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), ignore_backoff=True, # we only give the remote server 10s to respond. It should be an # easy request to handle, so if it doesn't reply within 10s, it's # probably not going to. # # Furthermore, when we are acting as a notary server, we cannot # wait all day for all of the origin servers, as the requesting # server will otherwise time out before we can respond. # # (Note that get_json may make 4 attempts, so this can still take # almost 45 seconds to fetch the headers, plus up to another 60s to # read the response). timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: # these both have str() representations which we can't really improve # upon raise KeyLookupError(str(e)) except HttpResponseException as e: raise KeyLookupError("Remote server returned an error: %s" % (e,)) if response["server_name"] != server_name: raise KeyLookupError( "Expected a response for server %r not %r" % (server_name, response["server_name"]) ) response_keys = await self.process_v2_response( from_server=server_name, response_json=response, time_added_ms=time_now_ms, ) await self.store.store_server_verify_keys( server_name, time_now_ms, ((server_name, key_id, key) for key_id, key in response_keys.items()), ) keys.update(response_keys) return keys async def _handle_key_deferred(verify_request) -> None: """Waits for the key to become available, and then performs a verification Args: verify_request (VerifyJsonRequest): Raises: SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name with PreserveLoggingContext(): _, key_id, verify_key = await verify_request.key_ready json_object = verify_request.json_object try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: logger.debug( "Error verifying signature for %s:%s:%s with key %s: %s", server_name, verify_key.alg, verify_key.version, encode_verify_key_base64(verify_key), str(e), ) raise SynapseError( 401, "Invalid signature for server %s with key %s:%s: %s" % (server_name, verify_key.alg, verify_key.version, str(e)), Codes.UNAUTHORIZED, )
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_4
crossvul-python_data_good_1315_1
# -*- coding: utf-8 -*- # See https://zulip.readthedocs.io/en/latest/subsystems/thumbnailing.html import base64 import os import sys import urllib from urllib.parse import urljoin, urlsplit, urlunsplit from django.conf import settings from libthumbor import CryptoURL ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath('__file__')))) sys.path.append(ZULIP_PATH) from zthumbor.loaders.helpers import ( THUMBOR_S3_TYPE, THUMBOR_LOCAL_FILE_TYPE, THUMBOR_EXTERNAL_TYPE ) from zerver.lib.camo import get_camo_url def is_thumbor_enabled() -> bool: return settings.THUMBOR_URL != '' def user_uploads_or_external(url: str) -> bool: u = urlsplit(url) return u.scheme != "" or u.netloc != "" or u.path.startswith("/user_uploads/") def get_source_type(url: str) -> str: if not url.startswith('/user_uploads/'): return THUMBOR_EXTERNAL_TYPE local_uploads_dir = settings.LOCAL_UPLOADS_DIR if local_uploads_dir: return THUMBOR_LOCAL_FILE_TYPE return THUMBOR_S3_TYPE def generate_thumbnail_url(path: str, size: str='0x0', is_camo_url: bool=False) -> str: path = urljoin("/", path) u = urlsplit(path) if not is_thumbor_enabled(): if u.scheme == "" and u.netloc == "": return urlunsplit(u) return get_camo_url(path) if u.scheme == "" and u.netloc == "" and not u.path.startswith("/user_uploads/"): return urlunsplit(u) source_type = get_source_type(path) safe_url = base64.urlsafe_b64encode(path.encode()).decode('utf-8') image_url = '%s/source_type/%s' % (safe_url, source_type) width, height = map(int, size.split('x')) crypto = CryptoURL(key=settings.THUMBOR_KEY) smart_crop_enabled = True apply_filters = ['no_upscale()'] if is_camo_url: smart_crop_enabled = False apply_filters.append('quality(100)') if size != '0x0': apply_filters.append('sharpen(0.5,0.2,true)') encrypted_url = crypto.generate( width=width, height=height, smart=smart_crop_enabled, filters=apply_filters, image_url=image_url ) if settings.THUMBOR_URL == 'http://127.0.0.1:9995': # If THUMBOR_URL is the default then thumbor is hosted on same machine # as the Zulip server and we should serve a relative URL. # We add a /thumbor in front of the relative url because we make # use of a proxy pass to redirect request internally in Nginx to 9995 # port where thumbor is running. thumbnail_url = '/thumbor' + encrypted_url else: thumbnail_url = urllib.parse.urljoin(settings.THUMBOR_URL, encrypted_url) return thumbnail_url
./CrossVul/dataset_final_sorted/CWE-601/py/good_1315_1
crossvul-python_data_good_1915_7
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains handlers for federation events.""" import itertools import logging from collections.abc import Container from http import HTTPStatus from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union import attr from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 from twisted.internet import defer from synapse import event_auth from synapse.api.constants import ( EventTypes, Membership, RejectedReason, RoomEncryptionAlgorithms, ) from synapse.api.errors import ( AuthError, CodeMessageException, Codes, FederationDeniedError, FederationError, HttpResponseException, NotFoundError, RequestSendFailed, SynapseError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.handlers._base import BaseHandler from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, preserve_fn, run_in_background, ) from synapse.logging.utils import log_function from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, ) from synapse.state import StateResolutionStore from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( JsonDict, MutableStateMap, PersistedEventPosition, RoomStreamToken, StateMap, UserID, get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) @attr.s(slots=True) class _NewEventInfo: """Holds information about a received event, ready for passing to _handle_new_events Attributes: event: the received event state: the state at that event auth_events: the auth_event map for that event """ event = attr.ib(type=EventBase) state = attr.ib(type=Optional[Sequence[EventBase]], default=None) auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None) class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: a) handling received Pdus before handing them on as Events to the rest of the homeserver (including auth and state conflict resolutions) b) converting events that were produced by local clients that may need to be sent to remote homeservers. c) doing the necessary dances to invite remote users and join remote rooms. """ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs self.store = hs.get_datastore() self.storage = hs.get_storage() self.state_store = self.storage.state self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self._state_resolution_handler = hs.get_state_resolution_handler() self.server_name = hs.hostname self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config self.http_client = hs.get_proxied_blacklisted_http_client() self._instance_name = hs.get_instance_name() self._replication = hs.get_replication_data_handler() self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( hs ) if hs.config.worker_app: self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client( hs ) self._maybe_store_room_on_outlier_membership = ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client( hs ) else: self._device_list_updater = hs.get_device_handler().device_list_updater self._maybe_store_room_on_outlier_membership = ( self.store.maybe_store_room_on_outlier_membership ) # When joining a room we need to queue any events for that room up. # For each room, a list of (pdu, origin) tuples. self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]] self._room_pdu_linearizer = Linearizer("fed_room_pdu") self.third_party_event_rules = hs.get_third_party_event_rules() self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events Args: origin (str): server which initiated the /send/ transaction. Will be used to fetch missing events or state. pdu (FrozenEvent): received PDU sent_to_us_directly (bool): True if this event was pushed to us; False if we pulled it as the result of a missing prev_event. """ room_id = pdu.room_id event_id = pdu.event_id logger.info("handling received PDU: %s", pdu) # We reprocess pdus when we have seen them only as outliers existing = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) # FIXME: Currently we fetch an event again when we already have it # if it has been marked as an outlier. already_seen = existing and ( not existing.internal_metadata.is_outlier() or pdu.internal_metadata.is_outlier() ) if already_seen: logger.debug("[%s %s]: Already seen pdu", room_id, event_id) return # do some initial sanity-checking of the event. In particular, make # sure it doesn't have hundreds of prev_events or auth_events, which # could cause a huge state resolution or cascade of event fetches. try: self._sanity_check_event(pdu) except SynapseError as err: logger.warning( "[%s %s] Received event failed sanity checks", room_id, event_id ) raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id) # If we are currently in the process of joining this room, then we # queue up events for later processing. if room_id in self.room_queues: logger.info( "[%s %s] Queuing PDU from %s for now: join in progress", room_id, event_id, origin, ) self.room_queues[room_id].append((pdu, origin)) return # If we're not in the room just ditch the event entirely. This is # probably an old server that has come back and thinks we're still in # the room (or we've been rejoined to the room by a state reset). # # Note that if we were never in the room then we would have already # dropped the event, since we wouldn't know the room version. is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "[%s %s] Ignoring PDU from %s as we're not in the room", room_id, event_id, origin, ) return None state = None # Get missing pdus if necessary. if not pdu.internal_metadata.is_outlier(): # We only backfill backwards to the min depth. min_depth = await self.get_min_depth_for_context(pdu.room_id) logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth) prevs = set(pdu.prev_event_ids()) seen = await self.store.have_events_in_timeline(prevs) if min_depth is not None and pdu.depth < min_depth: # This is so that we don't notify the user about this # message, to work around the fact that some events will # reference really really old events we really don't want to # send to the clients. pdu.internal_metadata.outlier = True elif min_depth is not None and pdu.depth > min_depth: missing_prevs = prevs - seen if sent_to_us_directly and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) with (await self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( "[%s %s] Acquired room lock to fetch %d missing prev_events", room_id, event_id, len(missing_prevs), ) try: await self._get_missing_events_for_pdu( origin, pdu, prevs, min_depth ) except Exception as e: raise Exception( "Error fetching missing prev_events for %s: %s" % (event_id, e) ) from e # Update the set of things we've seen after trying to # fetch the missing stuff seen = await self.store.have_events_in_timeline(prevs) if not prevs - seen: logger.info( "[%s %s] Found all missing prev_events", room_id, event_id, ) if prevs - seen: # We've still not been able to get all of the prev_events for this event. # # In this case, we need to fall back to asking another server in the # federation for the state at this event. That's ok provided we then # resolve the state against other bits of the DAG before using it (which # will ensure that you can't just take over a room by sending an event, # withholding its prev_events, and declaring yourself to be an admin in # the subsequent state request). # # Now, if we're pulling this event as a missing prev_event, then clearly # this event is not going to become the only forward-extremity and we are # guaranteed to resolve its state against our existing forward # extremities, so that should be fine. # # On the other hand, if this event was pushed to us, it is possible for # it to become the only forward-extremity in the room, and we would then # trust its state to be the state for the whole room. This is very bad. # Further, if the event was pushed to us, there is no excuse for us not to # have all the prev_events. We therefore reject any such events. # # XXX this really feels like it could/should be merged with the above, # but there is an interaction with min_depth that I'm not really # following. if sent_to_us_directly: logger.warning( "[%s %s] Rejecting: failed to fetch %d prev events: %s", room_id, event_id, len(prevs - seen), shortstr(prevs - seen), ) raise FederationError( "ERROR", 403, ( "Your server isn't divulging details about prev_events " "referenced in this event." ), affected=pdu.event_id, ) logger.info( "Event %s is missing prev_events: calculating state for a " "backwards extremity", event_id, ) # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. event_map = {event_id: pdu} try: # Get the state of the events we know about ours = await self.state_store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id state_maps = list(ours.values()) # type: List[StateMap[str]] # we don't need this any more, let's delete it. del ours # Ask the remote server for the states we don't # know about for p in prevs - seen: logger.info( "Requesting state at missing prev_event %s", event_id, ) with nested_logging_context(p): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. (remote_state, _,) = await self._get_state_for_room( origin, room_id, p, include_event_in_state=True ) remote_state_map = { (x.type, x.state_key): x.event_id for x in remote_state } state_maps.append(remote_state_map) for x in remote_state: event_map[x.event_id] = x room_version = await self.store.get_room_version_id(room_id) state_map = await self._state_resolution_handler.resolve_events_with_store( room_id, room_version, state_maps, event_map, state_res_store=StateResolutionStore(self.store), ) # We need to give _process_received_pdu the actual state events # rather than event ids, so generate that now. # First though we need to fetch all the events that are in # state_map, so we can build up the state below. evs = await self.store.get_events( list(state_map.values()), get_prev_content=False, redact_behaviour=EventRedactBehaviour.AS_IS, ) event_map.update(evs) state = [event_map[e] for e in state_map.values()] except Exception: logger.warning( "[%s %s] Error attempting to resolve state at missing " "prev_events", room_id, event_id, exc_info=True, ) raise FederationError( "ERROR", 403, "We can't get valid state history.", affected=event_id, ) await self._process_received_pdu(origin, pdu, state=state) async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): """ Args: origin (str): Origin of the pdu. Will be called to get the missing events pdu: received pdu prevs (set(str)): List of event ids which we are missing min_depth (int): Minimum depth of events to return. """ room_id = pdu.room_id event_id = pdu.event_id seen = await self.store.have_events_in_timeline(prevs) if not prevs - seen: return latest_list = await self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us latest = set(latest_list) latest |= seen logger.info( "[%s %s]: Requesting missing events between %s and %s", room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround # https://github.com/matrix-org/synapse/issues/1733. # The reason is to avoid holding the linearizer lock # whilst processing inbound /send transactions, causing # FDs to stack up and block other inbound transactions # which empirically can currently take up to 30 minutes. # # N.B. this explicitly disables retry attempts. # # N.B. this also increases our chances of falling back to # fetching fresh state for the room if the missing event # can't be found, which slightly reduces our security. # it may also increase our DAG extremity count for the room, # causing additional state resolution? See #1760. # However, fetching state doesn't hold the linearizer lock # apparently. # # see https://github.com/matrix-org/synapse/pull/1744 # # ---- # # Update richvdh 2018/09/18: There are a number of problems with timing this # request out aggressively on the client side: # # - it plays badly with the server-side rate-limiter, which starts tarpitting you # if you send too many requests at once, so you end up with the server carefully # working through the backlog of your requests, which you have already timed # out. # # - for this request in particular, we now (as of # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the # server can't produce a plausible-looking set of prev_events - so we becone # much more likely to reject the event. # # - contrary to what it says above, we do *not* fall back to fetching fresh state # for the room if get_missing_events times out. Rather, we give up processing # the PDU whose prevs we are missing, which then makes it much more likely that # we'll end up back here for the *next* PDU in the list, which exacerbates the # problem. # # - the aggressive 10s timeout was introduced to deal with incoming federation # requests taking 8 hours to process. It's not entirely clear why that was going # on; certainly there were other issues causing traffic storms which are now # resolved, and I think in any case we may be more sensible about our locking # now. We're *certainly* more sensible about our logging. # # All that said: Let's try increasing the timeout to 60s and see what happens. try: missing_events = await self.federation_client.get_missing_events( origin, room_id, earliest_events_ids=list(latest), latest_events=[pdu], limit=10, min_depth=min_depth, timeout=60000, ) except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e: # We failed to get the missing events, but since we need to handle # the case of `get_missing_events` not returning the necessary # events anyway, it is safe to simply log the error and continue. logger.warning( "[%s %s]: Failed to get prev_events: %s", room_id, event_id, e ) return logger.info( "[%s %s]: Got %d prev_events: %s", room_id, event_id, len(missing_events), shortstr(missing_events), ) # We want to sort these by depth so we process them and # tell clients about them in order. missing_events.sort(key=lambda x: x.depth) for ev in missing_events: logger.info( "[%s %s] Handling received prev_event %s", room_id, event_id, ev.event_id, ) with nested_logging_context(ev.event_id): try: await self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: if e.code == 403: logger.warning( "[%s %s] Received prev_event %s failed history check.", room_id, event_id, ev.event_id, ) else: raise async def _get_state_for_room( self, destination: str, room_id: str, event_id: str, include_event_in_state: bool = False, ) -> Tuple[List[EventBase], List[EventBase]]: """Requests all of the room state at a given event from a remote homeserver. Args: destination: The remote homeserver to query for the state. room_id: The id of the room we're interested in. event_id: The id of the event we want the state at. include_event_in_state: if true, the event itself will be included in the returned state event list. Returns: A list of events in the state, possibly including the event itself, and a list of events in the auth chain for the given event. """ ( state_event_ids, auth_event_ids, ) = await self.federation_client.get_room_state_ids( destination, room_id, event_id=event_id ) desired_events = set(state_event_ids + auth_event_ids) if include_event_in_state: desired_events.add(event_id) event_map = await self._get_events_from_store_or_dest( destination, room_id, desired_events ) failed_to_fetch = desired_events - event_map.keys() if failed_to_fetch: logger.warning( "Failed to fetch missing state/auth events for %s %s", event_id, failed_to_fetch, ) remote_state = [ event_map[e_id] for e_id in state_event_ids if e_id in event_map ] if include_event_in_state: remote_event = event_map.get(event_id) if not remote_event: raise Exception("Unable to get missing prev_event %s" % (event_id,)) if remote_event.is_state() and remote_event.rejected_reason is None: remote_state.append(remote_event) auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] auth_chain.sort(key=lambda e: e.depth) return remote_state, auth_chain async def _get_events_from_store_or_dest( self, destination: str, room_id: str, event_ids: Iterable[str] ) -> Dict[str, EventBase]: """Fetch events from a remote destination, checking if we already have them. Persists any events we don't already have as outliers. If we fail to fetch any of the events, a warning will be logged, and the event will be omitted from the result. Likewise, any events which turn out not to be in the given room. This function *does not* automatically get missing auth events of the newly fetched events. Callers must include the full auth chain of of the missing events in the `event_ids` argument, to ensure that any missing auth events are correctly fetched. Returns: map from event_id to event """ fetched_events = await self.store.get_events(event_ids, allow_rejected=True) missing_events = set(event_ids) - fetched_events.keys() if missing_events: logger.debug( "Fetching unknown state/auth events %s for room %s", missing_events, room_id, ) await self._get_events_and_persist( destination=destination, room_id=room_id, events=missing_events ) # we need to make sure we re-load from the database to get the rejected # state correct. fetched_events.update( (await self.store.get_events(missing_events, allow_rejected=True)) ) # check for events which were in the wrong room. # # this can happen if a remote server claims that the state or # auth_events at an event in room A are actually events in room B bad_events = [ (event_id, event.room_id) for event_id, event in fetched_events.items() if event.room_id != room_id ] for bad_event_id, bad_room_id in bad_events: # This is a bogus situation, but since we may only discover it a long time # after it happened, we try our best to carry on, by just omitting the # bad events from the returned auth/state set. logger.warning( "Remote server %s claims event %s in room %s is an auth/state " "event in room %s", destination, bad_event_id, bad_room_id, room_id, ) del fetched_events[bad_event_id] return fetched_events async def _process_received_pdu( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], ): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. Args: origin: server sending the event event: event to be persisted state: Normally None, but if we are handling a gap in the graph (ie, we are missing one or more prev_events), the resolved state at the event """ room_id = event.room_id event_id = event.event_id logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) try: await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") sender_key = event.content.get("sender_key") cached_devices = await self.store.get_cached_devices_for_user(event.sender) resync = False # Whether we should resync device lists. device = None if device_id is not None: device = cached_devices.get(device_id) if device is None: logger.info( "Received event from remote device not in our cache: %s %s", event.sender, device_id, ) resync = True # We also check if the `sender_key` matches what we expect. if sender_key is not None: # Figure out what sender key we're expecting. If we know the # device and recognize the algorithm then we can work out the # exact key to expect. Otherwise check it matches any key we # have for that device. current_keys = [] # type: Container[str] if device: keys = device.get("keys", {}).get("keys", {}) if ( event.content.get("algorithm") == RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2 ): # For this algorithm we expect a curve25519 key. key_name = "curve25519:%s" % (device_id,) current_keys = [keys.get(key_name)] else: # We don't know understand the algorithm, so we just # check it matches a key for the device. current_keys = keys.values() elif device_id: # We don't have any keys for the device ID. pass else: # The event didn't include a device ID, so we just look for # keys across all devices. current_keys = [ key for device in cached_devices.values() for key in device.get("keys", {}).get("keys", {}).values() ] # We now check that the sender key matches (one of) the expected # keys. if sender_key not in current_keys: logger.info( "Received event from remote device with unexpected sender key: %s %s: %s", event.sender, device_id or "<no device_id>", sender_key, ) resync = True if resync: run_as_background_process( "resync_device_due_to_pdu", self._resync_device, event.sender ) async def _resync_device(self, sender: str) -> None: """We have detected that the device list for the given user may be out of sync, so we try and resync them. """ try: await self.store.mark_remote_user_device_cache_as_stale(sender) # Immediately attempt a resync in the background if self.config.worker_app: await self._user_device_resync(user_id=sender) else: await self._device_list_updater.user_device_resync(sender) except Exception: logger.exception("Failed to resync device for %s", sender) @log_function async def backfill(self, dest, room_id, limit, extremities): """ Trigger a backfill request to `dest` for the given `room_id` This will attempt to get more events from the remote. If the other side has no new events to offer, this will return an empty list. As the events are received, we check their signatures, and also do some sanity-checking on them. If any of the backfilled events are invalid, this method throws a SynapseError. TODO: make this more useful to distinguish failures of the remote server from invalid events (there is probably no point in trying to re-fetch invalid events from every other HS in the room.) """ if dest == self.server_name: raise SynapseError(400, "Can't backfill from self.") events = await self.federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) if not events: return [] # ideally we'd sanity check the events here for excess prev_events etc, # but it's hard to reject events at this point without completely # breaking backfill in the same way that it is currently broken by # events whose signature we cannot verify (#3121). # # So for now we accept the events anyway. #3124 tracks this. # # for ev in events: # self._sanity_check_event(ev) # Don't bother processing events we already have. seen_events = await self.store.have_events_in_timeline( {e.event_id for e in events} ) events = [e for e in events if e.event_id not in seen_events] if not events: return [] event_map = {e.event_id: e for e in events} event_ids = {e.event_id for e in events} # build a list of events whose prev_events weren't in the batch. # (XXX: this will include events whose prev_events we already have; that doesn't # sound right?) edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids] logger.info("backfill: Got %d events with %d edges", len(events), len(edges)) # For each edge get the current state. auth_events = {} state_events = {} events_to_state = {} for e_id in edges: state, auth = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id, include_event_in_state=False, ) auth_events.update({a.event_id: a for a in auth}) auth_events.update({s.event_id: s for s in state}) state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state required_auth = { a_id for event in events + list(state_events.values()) + list(auth_events.values()) for a_id in event.auth_event_ids() } auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) ev_infos = [] # Step 1: persist the events in the chunk we fetched state for (i.e. # the backwards extremities), with custom auth events and state for e_id in events_to_state: # For paranoia we ensure that these events are marked as # non-outliers ev = event_map[e_id] assert not ev.internal_metadata.is_outlier() ev_infos.append( _NewEventInfo( event=ev, state=events_to_state[e_id], auth_events={ ( auth_events[a_id].type, auth_events[a_id].state_key, ): auth_events[a_id] for a_id in ev.auth_event_ids() if a_id in auth_events }, ) ) if ev_infos: await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) for event in events: if event in events_to_state: continue # For paranoia we ensure that these events are marked as # non-outliers assert not event.internal_metadata.is_outlier() # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. await self._handle_new_event(dest, event, backfilled=True) return events async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: """Checks the database to see if we should backfill before paginating, and if so do. Args: room_id current_depth: The depth from which we're paginating from. This is used to decide if we should backfill and what extremities to use. limit: The number of events that the pagination request will return. This is used as part of the heuristic to decide if we should back paginate. """ extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") return False # We only want to paginate if we can actually see the events we'll get, # as otherwise we'll just spend a lot of resources to get redacted # events. # # We do this by filtering all the backwards extremities and seeing if # any remain. Given we don't have the extremity events themselves, we # need to actually check the events that reference them. # # *Note*: the spec wants us to keep backfilling until we reach the start # of the room in case we are allowed to see some of the history. However # in practice that causes more issues than its worth, as a) its # relatively rare for there to be any visible history and b) even when # there is its often sufficiently long ago that clients would stop # attempting to paginate before backfill reached the visible history. # # TODO: If we do do a backfill then we should filter the backwards # extremities to only include those that point to visible portions of # history. # # TODO: Correctly handle the case where we are allowed to see the # forward event but not the backward extremity, e.g. in the case of # initial join of the server where we are allowed to see the join # event but not anything before it. This would require looking at the # state *before* the event, ignoring the special casing certain event # types have. forward_events = await self.store.get_successor_events(list(extremities)) extremities_events = await self.store.get_events( forward_events, redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, ) # We set `check_history_visibility_only` as we might otherwise get false # positives from users having been erased. filtered_extremities = await filter_events_for_server( self.storage, self.server_name, list(extremities_events.values()), redact=False, check_history_visibility_only=True, ) if not filtered_extremities: return False # Check if we reached a point where we should start backfilling. sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) max_depth = sorted_extremeties_tuple[0][1] # If we're approaching an extremity we trigger a backfill, otherwise we # no-op. # # We chose twice the limit here as then clients paginating backwards # will send pagination requests that trigger backfill at least twice # using the most recent extremity before it gets removed (see below). We # chose more than one times the limit in case of failure, but choosing a # much larger factor will result in triggering a backfill request much # earlier than necessary. if current_depth - 2 * limit > max_depth: logger.debug( "Not backfilling as we don't need to. %d < %d - 2 * %d", max_depth, current_depth, limit, ) return False logger.debug( "room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s", room_id, current_depth, max_depth, sorted_extremeties_tuple, ) # We ignore extremities that have a greater depth than our current depth # as: # 1. we don't really care about getting events that have happened # before our current position; and # 2. we have likely previously tried and failed to backfill from that # extremity, so to avoid getting "stuck" requesting the same # backfill repeatedly we drop those extremities. filtered_sorted_extremeties_tuple = [ t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth ] # However, we need to check that the filtered extremities are non-empty. # If they are empty then either we can a) bail or b) still attempt to # backill. We opt to try backfilling anyway just in case we do get # relevant events. if filtered_sorted_extremeties_tuple: sorted_extremeties_tuple = filtered_sorted_extremeties_tuple # We don't want to specify too many extremities as it causes the backfill # request URI to be too long. extremities = dict(sorted_extremeties_tuple[:5]) # Now we need to decide which hosts to hit first. # First we try hosts that are already in the room # TODO: HEURISTIC ALERT. curr_state = await self.state_handler.get_current_state(room_id) def get_domains_from_state(state): """Get joined domains from state Args: state (dict[tuple, FrozenEvent]): State map from type/state key to event. Returns: list[tuple[str, int]]: Returns a list of servers with the lowest depth of their joins. Sorted by lowest depth first. """ joined_users = [ (state_key, int(event.depth)) for (e_type, state_key), event in state.items() if e_type == EventTypes.Member and event.membership == Membership.JOIN ] joined_domains = {} # type: Dict[str, int] for u, d in joined_users: try: dom = get_domain_from_id(u) old_d = joined_domains.get(dom) if old_d: joined_domains[dom] = min(d, old_d) else: joined_domains[dom] = d except Exception: pass return sorted(joined_domains.items(), key=lambda d: d[1]) curr_domains = get_domains_from_state(curr_state) likely_domains = [ domain for domain, depth in curr_domains if domain != self.server_name ] async def try_backfill(domains): # TODO: Should we try multiple of these at a time? for dom in domains: try: await self.backfill( dom, room_id, limit=100, extremities=extremities ) # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. return True except SynapseError as e: logger.info("Failed to backfill from %s because %s", dom, e) continue except HttpResponseException as e: if 400 <= e.code < 500: raise e.to_synapse_error() logger.info("Failed to backfill from %s because %s", dom, e) continue except CodeMessageException as e: if 400 <= e.code < 500: raise logger.info("Failed to backfill from %s because %s", dom, e) continue except NotRetryingDestination as e: logger.info(str(e)) continue except RequestSendFailed as e: logger.info("Failed to get backfill from %s because %s", dom, e) continue except FederationDeniedError as e: logger.info(e) continue except Exception as e: logger.exception("Failed to backfill from %s because %s", dom, e) continue return False success = await try_backfill(likely_domains) if success: return True # Huh, well *those* domains didn't work out. Lets try some domains # from the time. tried_domains = set(likely_domains) tried_domains.add(self.server_name) event_ids = list(extremities.keys()) logger.debug("calling resolve_state_groups in _maybe_backfill") resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) states = await make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) ) # dict[str, dict[tuple, str]], a map from event_id to state map of # event_ids. states = dict(zip(event_ids, [s.state for s in states])) state_map = await self.store.get_events( [e_id for ids in states.values() for e_id in ids.values()], get_prev_content=False, ) states = { key: { k: state_map[e_id] for k, e_id in state_dict.items() if e_id in state_map } for key, state_dict in states.items() } for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id]) success = await try_backfill( [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: return True tried_domains.update(dom for dom, _ in likely_domains) return False async def _get_events_and_persist( self, destination: str, room_id: str, events: Iterable[str] ): """Fetch the given events from a server, and persist them as outliers. This function *does not* recursively get missing auth events of the newly fetched events. Callers must include in the `events` argument any missing events from the auth chain. Logs a warning if we can't find the given event. """ room_version = await self.store.get_room_version(room_id) event_map = {} # type: Dict[str, EventBase] async def get_event(event_id: str): with nested_logging_context(event_id): try: event = await self.federation_client.get_pdu( [destination], event_id, room_version, outlier=True, ) if event is None: logger.warning( "Server %s didn't return event %s", destination, event_id, ) return event_map[event.event_id] = event except Exception as e: logger.warning( "Error fetching missing state/auth event %s: %s %s", event_id, type(e), e, ) await concurrently_execute(get_event, events, 5) # Make a map of auth events for each event. We do this after fetching # all the events as some of the events' auth events will be in the list # of requested events. auth_events = [ aid for event in event_map.values() for aid in event.auth_event_ids() if aid not in event_map ] persisted_events = await self.store.get_events( auth_events, allow_rejected=True, ) event_infos = [] for event in event_map.values(): auth = {} for auth_event_id in event.auth_event_ids(): ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id) if ae: auth[(ae.type, ae.state_key)] = ae else: logger.info("Missing auth event %s", auth_event_id) event_infos.append(_NewEventInfo(event, None, auth)) await self._handle_new_events( destination, room_id, event_infos, ) def _sanity_check_event(self, ev): """ Do some early sanity checks of a received event In particular, checks it doesn't have an excessive number of prev_events or auth_events, which could cause a huge state resolution or cascade of event fetches. Args: ev (synapse.events.EventBase): event to be checked Returns: None Raises: SynapseError if the event does not pass muster """ if len(ev.prev_event_ids()) > 20: logger.warning( "Rejecting event %s which has %i prev_events", ev.event_id, len(ev.prev_event_ids()), ) raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events") if len(ev.auth_event_ids()) > 10: logger.warning( "Rejecting event %s which has %i auth_events", ev.event_id, len(ev.auth_event_ids()), ) raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events") async def send_invite(self, target_host, event): """ Sends the invite to the remote server for signing. Invites must be signed by the invitee's server before distribution. """ pdu = await self.federation_client.send_invite( destination=target_host, room_id=event.room_id, event_id=event.event_id, pdu=event, ) return pdu async def on_event_auth(self, event_id: str) -> List[EventBase]: event = await self.store.get_event(event_id) auth = await self.store.get_auth_chain( list(event.auth_event_ids()), include_given=True ) return list(auth) async def do_invite_join( self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict ) -> Tuple[str, int]: """ Attempts to join the `joinee` to the room `room_id` via the servers contained in `target_hosts`. This first triggers a /make_join/ request that returns a partial event that we can fill out and sign. This is then sent to the remote server via /send_join/ which responds with the state at that event and the auth_chains. We suspend processing of any received events from this room until we have finished processing the join. Args: target_hosts: List of servers to attempt to join the room with. room_id: The ID of the room to join. joinee: The User ID of the joining user. content: The event content to use for the join event. """ # TODO: We should be able to call this on workers, but the upgrading of # room stuff after join currently doesn't work on workers. assert self.config.worker.worker_app is None logger.debug("Joining %s to %s", joinee, room_id) origin, event, room_version_obj = await self._make_and_verify_event( target_hosts, room_id, joinee, "join", content, params={"ver": KNOWN_ROOM_VERSIONS}, ) # This shouldn't happen, because the RoomMemberHandler has a # linearizer lock which only allows one operation per user per room # at a time - so this is just paranoia. assert room_id not in self.room_queues self.room_queues[room_id] = [] await self._clean_room_for_join(room_id) handled_events = set() try: # Try the host we successfully got a response to /make_join/ # request first. host_list = list(target_hosts) try: host_list.remove(origin) host_list.insert(0, origin) except ValueError: pass ret = await self.federation_client.send_join( host_list, event, room_version_obj ) origin = ret["origin"] state = ret["state"] auth_chain = ret["auth_chain"] auth_chain.sort(key=lambda e: e.depth) handled_events.update([s.event_id for s in state]) handled_events.update([a.event_id for a in auth_chain]) handled_events.add(event.event_id) logger.debug("do_invite_join auth_chain: %s", auth_chain) logger.debug("do_invite_join state: %s", state) logger.debug("do_invite_join event: %s", event) # if this is the first time we've joined this room, it's time to add # a row to `rooms` with the correct room version. If there's already a # row there, we should override it, since it may have been populated # based on an invite request which lied about the room version. # # federation_client.send_join has already checked that the room # version in the received create event is the same as room_version_obj, # so we can rely on it now. # await self.store.upsert_room_on_join( room_id=room_id, room_version=room_version_obj, ) max_stream_id = await self._persist_auth_tree( origin, room_id, auth_chain, state, event, room_version_obj ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. await self._replication.wait_for_stream_position( self.config.worker.events_shard_config.get_instance(room_id), "events", max_stream_id, ) # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information predecessor = await self.store.get_room_predecessor(room_id) if not predecessor or not isinstance(predecessor.get("room_id"), str): return event.event_id, max_stream_id old_room_id = predecessor["room_id"] logger.debug( "Found predecessor for %s during remote join: %s", room_id, old_room_id ) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.transfer_room_state_on_room_upgrade( old_room_id, room_id ) logger.debug("Finished joining %s to %s", joinee, room_id) return event.event_id, max_stream_id finally: room_queue = self.room_queues[room_id] del self.room_queues[room_id] # we don't need to wait for the queued events to be processed - # it's just a best-effort thing at this point. We do want to do # them roughly in order, though, otherwise we'll end up making # lots of requests for missing prev_events which we do actually # have. Hence we fire off the background task, but don't wait for it. run_in_background(self._handle_queued_pdus, room_queue) async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. Args: room_queue (list[FrozenEvent, str]): list of PDUs to be processed and the servers that sent them """ for p, origin in room_queue: try: logger.info( "Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id, ) with nested_logging_context(p.event_id): await self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warning( "Error handling queued PDU %s from %s: %s", p.event_id, origin, e ) async def on_make_join_request( self, origin: str, room_id: str, user_id: str ) -> EventBase: """ We've received a /make_join/ request, so we create a partial join event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: origin: The (verified) server name of the requesting server. room_id: Room to create join event in user_id: The user to create the join for """ if get_domain_from_id(user_id) != origin: logger.info( "Got /make_join request for user %r from different origin %s, ignoring", user_id, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) # checking the room version will check that we've actually heard of the room # (and return a 404 otherwise) room_version = await self.store.get_room_version_id(room_id) # now check that we are *still* in the room is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "Got /make_join request for room %s we are no longer in", room_id, ) raise NotFoundError("Not an active room on this server") event_content = {"membership": Membership.JOIN} builder = self.event_builder_factory.new( room_version, { "type": EventTypes.Member, "content": event_content, "room_id": room_id, "sender": user_id, "state_key": user_id, }, ) try: event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) except SynapseError as e: logger.warning("Failed to create join to %s because %s", room_id, e) raise # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) return event async def on_send_join_request(self, origin, pdu): """ We have received a join event for a room. Fully process it and respond with the current state and auth chains. """ event = pdu logger.debug( "on_send_join_request from %s: Got event: %s, signatures: %s", origin, event.event_id, event.signatures, ) if get_domain_from_id(event.sender) != origin: logger.info( "Got /send_join request for user %r from different origin %s", event.sender, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) event.internal_metadata.outlier = False # Send this event on behalf of the origin server. # # The reasons we have the destination server rather than the origin # server send it are slightly mysterious: the origin server should have # all the necessary state once it gets the response to the send_join, # so it could send the event itself if it wanted to. It may be that # doing it this way reduces failure modes, or avoids certain attacks # where a new server selectively tells a subset of the federation that # it has joined. # # The fact is that, as of the current writing, Synapse doesn't send out # the join event over federation after joining, and changing it now # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin context = await self._handle_new_event(origin, event) logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(state_ids) state = await self.store.get_events(list(prev_state_ids.values())) return {"state": list(state.values()), "auth_chain": auth_chain} async def on_invite_request( self, origin: str, event: EventBase, room_version: RoomVersion ): """ We've got an invite event. Process and persist it. Sign it. Respond with the now signed event. """ if event.state_key is None: raise SynapseError(400, "The invite event did not have a state key") is_blocked = await self.store.is_room_blocked(event.room_id) if is_blocked: raise SynapseError(403, "This room has been blocked on this server") if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") if not self.spam_checker.user_may_invite( event.sender, event.state_key, event.room_id ): raise SynapseError( 403, "This user is not permitted to send invites to this server/user" ) membership = event.content.get("membership") if event.type != EventTypes.Member or membership != Membership.INVITE: raise SynapseError(400, "The event was not an m.room.member invite event") sender_domain = get_domain_from_id(event.sender) if sender_domain != origin: raise SynapseError( 400, "The invite event was not from the server sending it" ) if not self.is_mine_id(event.state_key): raise SynapseError(400, "The invite event must be for this server") # block any attempts to invite the server notices mxid if event.state_key == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") # keep a record of the room version, if we don't yet know it. # (this may get overwritten if we later get a different room version in a # join dance). await self._maybe_store_room_on_outlier_membership( room_id=event.room_id, room_version=room_version ) event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True event.signatures.update( compute_event_signature( room_version, event.get_pdu_json(), self.hs.hostname, self.hs.signing_key, ) ) context = await self.state_handler.compute_event_context(event) await self.persist_events_and_notify(event.room_id, [(event, context)]) return event async def do_remotely_reject_invite( self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict ) -> Tuple[EventBase, int]: origin, event, room_version = await self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not # even in the room. event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True # Try the host that we successfully called /make_leave/ on first for # the /send_leave/ request. host_list = list(target_hosts) try: host_list.remove(origin) host_list.insert(0, origin) except ValueError: pass await self.federation_client.send_leave(host_list, event) context = await self.state_handler.compute_event_context(event) stream_id = await self.persist_events_and_notify( event.room_id, [(event, context)] ) return event, stream_id async def _make_and_verify_event( self, target_hosts: Iterable[str], room_id: str, user_id: str, membership: str, content: JsonDict = {}, params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, ) -> Tuple[str, EventBase, RoomVersion]: ( origin, event, room_version, ) = await self.federation_client.make_membership_event( target_hosts, room_id, user_id, membership, content, params=params ) logger.debug("Got response to make_%s: %s", membership, event) # We should assert some things. # FIXME: Do this in a nicer way assert event.type == EventTypes.Member assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id return origin, event, room_version async def on_make_leave_request( self, origin: str, room_id: str, user_id: str ) -> EventBase: """ We've received a /make_leave/ request, so we create a partial leave event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: origin: The (verified) server name of the requesting server. room_id: Room to create leave event in user_id: The user to create the leave for """ if get_domain_from_id(user_id) != origin: logger.info( "Got /make_leave request for user %r from different origin %s, ignoring", user_id, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, { "type": EventTypes.Member, "content": {"membership": Membership.LEAVE}, "room_id": room_id, "sender": user_id, "state_key": user_id, }, ) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) raise e return event async def on_send_leave_request(self, origin, pdu): """ We have received a leave event for a room. Fully process it.""" event = pdu logger.debug( "on_send_leave_request: Got event: %s, signatures: %s", event.event_id, event.signatures, ) if get_domain_from_id(event.sender) != origin: logger.info( "Got /send_leave request for user %r from different origin %s", event.sender, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) event.internal_metadata.outlier = False await self._handle_new_event(origin, event) logger.debug( "on_send_leave_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) return None async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]: """Returns the state at the event. i.e. not including said event. """ event = await self.store.get_event(event_id, check_room_id=room_id) state_groups = await self.state_store.get_state_groups(room_id, [event_id]) if state_groups: _, state = list(state_groups.items()).pop() results = {(e.type, e.state_key): e for e in state} if event.is_state(): # Get previous state if "replaces_state" in event.unsigned: prev_id = event.unsigned["replaces_state"] if prev_id != event.event_id: prev_event = await self.store.get_event(prev_id) results[(event.type, event.state_key)] = prev_event else: del results[(event.type, event.state_key)] res = list(results.values()) return res else: return [] async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: """Returns the state at the event. i.e. not including said event. """ event = await self.store.get_event(event_id, check_room_id=room_id) state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) if state_groups: _, state = list(state_groups.items()).pop() results = state if event.is_state(): # Get previous state if "replaces_state" in event.unsigned: prev_id = event.unsigned["replaces_state"] if prev_id != event.event_id: results[(event.type, event.state_key)] = prev_id else: results.pop((event.type, event.state_key), None) return list(results.values()) else: return [] @log_function async def on_backfill_request( self, origin: str, room_id: str, pdu_list: List[str], limit: int ) -> List[EventBase]: in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # Synapse asks for 100 events per backfill request. Do not allow more. limit = min(limit, 100) events = await self.store.get_backfill_events(room_id, pdu_list, limit) events = await filter_events_for_server(self.storage, origin, events) return events @log_function async def get_persisted_pdu( self, origin: str, event_id: str ) -> Optional[EventBase]: """Get an event from the database for the given server. Args: origin: hostname of server which is requesting the event; we will check that the server is allowed to see it. event_id: id of the event being requested Returns: None if we know nothing about the event; otherwise the (possibly-redacted) event. Raises: AuthError if the server is not currently in the room """ event = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) if event: in_room = await self.auth.check_host_in_room(event.room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") events = await filter_events_for_server(self.storage, origin, [event]) event = events[0] return event else: return None async def get_min_depth_for_context(self, context): return await self.store.get_min_depth(context) async def _handle_new_event( self, origin, event, state=None, auth_events=None, backfilled=False ): context = await self._prep_event( origin, event, state=state, auth_events=auth_events, backfilled=backfilled ) try: if ( not event.internal_metadata.is_outlier() and not backfilled and not context.rejected ): await self.action_generator.handle_push_actions_for_event( event, context ) await self.persist_events_and_notify( event.room_id, [(event, context)], backfilled=backfilled ) except Exception: run_in_background( self.store.remove_push_actions_from_staging, event.event_id ) raise return context async def _handle_new_events( self, origin: str, room_id: str, event_infos: Iterable[_NewEventInfo], backfilled: bool = False, ) -> None: """Creates the appropriate contexts and persists events. The events should not depend on one another, e.g. this should be used to persist a bunch of outliers, but not a chunk of individual events that depend on each other for state calculations. Notifies about the events where appropriate. """ async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): res = await self._prep_event( origin, event, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, ) return res contexts = await make_deferred_yieldable( defer.gatherResults( [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) await self.persist_events_and_notify( room_id, [ (ev_info.event, context) for ev_info, context in zip(event_infos, contexts) ], backfilled=backfilled, ) async def _persist_auth_tree( self, origin: str, room_id: str, auth_events: List[EventBase], state: List[EventBase], event: EventBase, room_version: RoomVersion, ) -> int: """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event separately. Notifies about the persisted events where appropriate. Will attempt to fetch missing auth events. Args: origin: Where the events came from room_id, auth_events state event room_version: The room version we expect this room to have, and will raise if it doesn't match the version in the create event. """ events_to_context = {} for e in itertools.chain(auth_events, state): e.internal_metadata.outlier = True ctx = await self.state_handler.compute_event_context(e) events_to_context[e.event_id] = ctx event_map = { e.event_id: e for e in itertools.chain(auth_events, state, [event]) } create_event = None for e in auth_events: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") room_version_id = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if room_version.identifier != room_version_id: raise SynapseError(400, "Room version mismatch") missing_auth_events = set() for e in itertools.chain(auth_events, state, [event]): for e_id in e.auth_event_ids(): if e_id not in event_map: missing_auth_events.add(e_id) for e_id in missing_auth_events: m_ev = await self.federation_client.get_pdu( [origin], e_id, room_version=room_version, outlier=True, timeout=10000, ) if m_ev and m_ev.event_id == e_id: event_map[e_id] = m_ev else: logger.info("Failed to find auth event %r", e_id) for e in itertools.chain(auth_events, state, [event]): auth_for_e = { (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id] for e_id in e.auth_event_ids() if e_id in event_map } if create_event: auth_for_e[(EventTypes.Create, "")] = create_event try: event_auth.check(room_version, e, auth_events=auth_for_e) except SynapseError as err: # we may get SynapseErrors here as well as AuthErrors. For # instance, there are a couple of (ancient) events in some # rooms whose senders do not have the correct sigil; these # cause SynapseErrors in auth.check. We don't want to give up # the attempt to federate altogether in such cases. logger.warning("Rejecting %s because %s", e.event_id, err.msg) if e == event: raise events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR await self.persist_events_and_notify( room_id, [ (e, events_to_context[e.event_id]) for e in itertools.chain(auth_events, state) ], ) new_event_context = await self.state_handler.compute_event_context( event, old_state=state ) return await self.persist_events_and_notify( room_id, [(event, new_event_context)] ) async def _prep_event( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], auth_events: Optional[MutableStateMap[EventBase]], backfilled: bool, ) -> EventContext: context = await self.state_handler.compute_event_context(event, old_state=state) if not auth_events: prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self.auth.compute_auth_events( event, prev_state_ids, for_verification=True ) auth_events_x = await self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} # This is a hack to fix some old rooms where the initial join event # didn't reference the create event in its auth events. if event.type == EventTypes.Member and not event.auth_event_ids(): if len(event.prev_event_ids()) == 1 and event.depth < 5: c = await self.store.get_event( event.prev_event_ids()[0], allow_none=True ) if c and c.type == EventTypes.Create: auth_events[(c.type, c.state_key)] = c context = await self.do_auth(origin, event, context, auth_events=auth_events) if not context.rejected: await self._check_for_soft_fail(event, state, backfilled) if event.type == EventTypes.GuestAccess and not context.rejected: await self.maybe_kick_guest_users(event) return context async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: """Checks if we should soft fail the event; if so, marks the event as such. Args: event state: The state at the event if we don't have all the event's prev events backfilled: Whether the event is from backfill """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. if backfilled or event.internal_metadata.is_outlier(): return extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids_list) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: # If they're the same then the current state is the same as the # state at the event, so no point rechecking auth for soft fail. return room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". if state is not None: # If we're explicitly given the state then we won't have all the # prev events, and so we have a gap in the graph. In this case # we want to be a little careful as we might have been down for # a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # # So we use a "current state" that is actually a state # resolution across the current forward extremities and the # given state at the event. This should correctly handle cases # like bans, especially with state res v2. state_sets_d = await self.state_store.get_state_groups( event.room_id, extrem_ids ) state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]] state_sets.append(state) current_states = await self.state_handler.resolve_events( room_version, state_sets, event ) current_state_ids = { k: e.event_id for k, e in current_states.items() } # type: StateMap[str] else: current_state_ids = await self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids ) logger.debug( "Doing soft-fail check for %s: state %s", event.event_id, current_state_ids, ) # Now check if event pass auth against said current state auth_types = auth_types_for_event(event) current_state_ids_list = [ e for k, e in current_state_ids.items() if k in auth_types ] auth_events_map = await self.store.get_events(current_state_ids_list) current_auth_events = { (e.type, e.state_key): e for e in auth_events_map.values() } try: event_auth.check(room_version_obj, event, auth_events=current_auth_events) except AuthError as e: logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True async def on_query_auth( self, origin, event_id, room_id, remote_auth_chain, rejects, missing ): in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") event = await self.store.get_event(event_id, check_room_id=room_id) # Just go through and process each event in `remote_auth_chain`. We # don't want to fall into the trap of `missing` being wrong. for e in remote_auth_chain: try: await self._handle_new_event(origin, e) except AuthError: pass # Now get the current auth_chain for the event. local_auth_chain = await self.store.get_auth_chain( list(event.auth_event_ids()), include_given=True ) # TODO: Check if we would now reject event_id. If so we need to tell # everyone. ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) logger.debug("on_query_auth returning: %s", ret) return ret async def on_get_missing_events( self, origin, room_id, earliest_events, latest_events, limit ): in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # Only allow up to 20 events to be retrieved per request. limit = min(limit, 20) missing_events = await self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, ) missing_events = await filter_events_for_server( self.storage, origin, missing_events ) return missing_events async def do_auth( self, origin: str, event: EventBase, context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: """ Args: origin: event: context: auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room at the event's position in the DAG, though occasionally (eg if the event is an outlier), may be the auth events claimed by the remote server. Also NB that this function adds entries to it. Returns: updated context object """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events ) except Exception: # We don't really mind if the above fails, so lets not fail # processing if it does. However, it really shouldn't fail so # let's still log as an exception since we'll still want to fix # any bugs. logger.exception( "Failed to double check auth events for %s with remote. " "Ignoring failure and continuing processing of event.", event.event_id, ) try: event_auth.check(room_version_obj, event, auth_events=auth_events) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR return context async def _update_auth_events_and_context_for_auth( self, origin: str, event: EventBase, context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: """Helper for do_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if we can come to a consensus (e.g. if one server missed some valid state). This attempts to resolve any potential divergence of state between servers, but is not essential and so failures should not block further processing of the event. Args: origin: event: context: auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room at the event's position in the DAG, though occasionally (eg if the event is an outlier), may be the auth events claimed by the remote server. Also NB that this function adds entries to it. Returns: updated context """ event_auth_events = set(event.auth_event_ids()) # missing_auth is the set of the event's auth_events which we don't yet have # in auth_events. missing_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) # if we have missing events, we need to fetch those events from somewhere. # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: have_events = await self.store.have_seen_events(missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) if missing_auth: # If we don't have all the auth events, we need to get them. logger.info("auth_events contains unknown events: %s", missing_auth) try: try: remote_auth_chain = await self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) except RequestSendFailed as e1: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. logger.info("Failed to get event auth from remote: %s", e1) return context seen_remotes = await self.store.have_seen_events( [e.event_id for e in remote_auth_chain] ) for e in remote_auth_chain: if e.event_id in seen_remotes: continue if e.event_id == event.event_id: continue try: auth_ids = e.auth_event_ids() auth = { (e.type, e.state_key): e for e in remote_auth_chain if e.event_id in auth_ids or e.type == EventTypes.Create } e.internal_metadata.outlier = True logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e except AuthError: pass except Exception: logger.exception("Failed to get auth chain") if event.internal_metadata.is_outlier(): # XXX: given that, for an outlier, we'll be working with the # event's *claimed* auth events rather than those we calculated: # (a) is there any point in this test, since different_auth below will # obviously be empty # (b) alternatively, why don't we do it earlier? logger.info("Skipping auth_event fetch for outlier") return context different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) if not different_auth: return context logger.info( "auth_events refers to events which are not in our calculated auth " "chain: %s", different_auth, ) # XXX: currently this checks for redactions but I'm not convinced that is # necessary? different_events = await self.store.get_events_as_list(different_auth) for d in different_events: if d.room_id != event.room_id: logger.warning( "Event %s refers to auth_event %s which is in a different room", event.event_id, d.event_id, ) # don't attempt to resolve the claimed auth events against our own # in this case: just use our own auth events. # # XXX: should we reject the event in this case? It feels like we should, # but then shouldn't we also do so if we've failed to fetch any of the # auth events? return context # now we state-resolve between our own idea of the auth events, and the remote's # idea of them. local_state = auth_events.values() remote_auth_events = dict(auth_events) remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_state = remote_auth_events.values() room_version = await self.store.get_room_version_id(event.room_id) new_state = await self.state_handler.resolve_events( room_version, (local_state, remote_state), event ) logger.info( "After state res: updating auth_events with new state %s", { (d.type, d.state_key): d.event_id for d in new_state.values() if auth_events.get((d.type, d.state_key)) != d }, ) auth_events.update(new_state) context = await self._update_context_for_auth_events( event, context, auth_events ) return context async def _update_context_for_auth_events( self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] ) -> EventContext: """Update the state_ids in an event context after auth event resolution, storing the changes as a new state group. Args: event: The event we're handling the context for context: initial event context auth_events: Events to update in the event context. Returns: new event context """ # exclude the state key of the new event from the current_state in the context. if event.is_state(): event_key = (event.type, event.state_key) # type: Optional[Tuple[str, str]] else: event_key = None state_updates = { k: a.event_id for k, a in auth_events.items() if k != event_key } current_state_ids = await context.get_current_state_ids() current_state_ids = dict(current_state_ids) # type: ignore current_state_ids.update(state_updates) prev_state_ids = await context.get_prev_state_ids() prev_state_ids = dict(prev_state_ids) prev_state_ids.update({k: a.event_id for k, a in auth_events.items()}) # create a new state group as a delta from the existing one. prev_group = context.state_group state_group = await self.state_store.store_state_group( event.event_id, event.room_id, prev_group=prev_group, delta_ids=state_updates, current_state_ids=current_state_ids, ) return EventContext.with_state( state_group=state_group, state_group_before_event=context.state_group_before_event, current_state_ids=current_state_ids, prev_state_ids=prev_state_ids, prev_group=prev_group, delta_ids=state_updates, ) async def construct_auth_difference( self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase] ) -> Dict: """ Given a local and remote auth chain, find the differences. This assumes that we have already processed all events in remote_auth Params: local_auth (list) remote_auth (list) Returns: dict """ logger.debug("construct_auth_difference Start!") # TODO: Make sure we are OK with local_auth or remote_auth having more # auth events in them than strictly necessary. def sort_fun(ev): return ev.depth, ev.event_id logger.debug("construct_auth_difference after sort_fun!") # We find the differences by starting at the "bottom" of each list # and iterating up on both lists. The lists are ordered by depth and # then event_id, we iterate up both lists until we find the event ids # don't match. Then we look at depth/event_id to see which side is # missing that event, and iterate only up that list. Repeat. remote_list = list(remote_auth) remote_list.sort(key=sort_fun) local_list = list(local_auth) local_list.sort(key=sort_fun) local_iter = iter(local_list) remote_iter = iter(remote_list) logger.debug("construct_auth_difference before get_next!") def get_next(it, opt=None): try: return next(it) except Exception: return opt current_local = get_next(local_iter) current_remote = get_next(remote_iter) logger.debug("construct_auth_difference before while") missing_remotes = [] missing_locals = [] while current_local or current_remote: if current_remote is None: missing_locals.append(current_local) current_local = get_next(local_iter) continue if current_local is None: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue if current_local.event_id == current_remote.event_id: current_local = get_next(local_iter) current_remote = get_next(remote_iter) continue if current_local.depth < current_remote.depth: missing_locals.append(current_local) current_local = get_next(local_iter) continue if current_local.depth > current_remote.depth: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue # They have the same depth, so we fall back to the event_id order if current_local.event_id < current_remote.event_id: missing_locals.append(current_local) current_local = get_next(local_iter) if current_local.event_id > current_remote.event_id: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue logger.debug("construct_auth_difference after while") # missing locals should be sent to the server # We should find why we are missing remotes, as they will have been # rejected. # Remove events from missing_remotes if they are referencing a missing # remote. We only care about the "root" rejected ones. missing_remote_ids = [e.event_id for e in missing_remotes] base_remote_rejected = list(missing_remotes) for e in missing_remotes: for e_id in e.auth_event_ids(): if e_id in missing_remote_ids: try: base_remote_rejected.remove(e) except ValueError: pass reason_map = {} for e in base_remote_rejected: reason = await self.store.get_rejection_reason(e.event_id) if reason is None: # TODO: e is not in the current state, so we should # construct some proof of that. continue reason_map[e.event_id] = reason logger.debug("construct_auth_difference returning") return { "auth_chain": local_auth, "rejects": { e.event_id: {"reason": reason_map[e.event_id], "proof": None} for e in base_remote_rejected }, "missing": [e.event_id for e in missing_locals], } @log_function async def exchange_third_party_invite( self, sender_user_id, target_user_id, room_id, signed ): third_party_invite = {"signed": signed} event_dict = { "type": EventTypes.Member, "content": { "membership": Membership.INVITE, "third_party_invite": third_party_invite, }, "room_id": room_id, "sender": sender_user_id, "state_key": target_user_id, } if await self.auth.check_host_in_room(room_id, self.hs.hostname): room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new(room_version, event_dict) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) EventValidator().validate_new(event, self.config) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = self.hs.hostname try: await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) raise e await self._check_signature(event, context) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.send_membership_event(None, event, context) else: destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)} await self.federation_client.forward_third_party_invite( destinations, room_id, event_dict ) async def on_exchange_third_party_invite_request( self, event_dict: JsonDict ) -> None: """Handle an exchange_third_party_invite request from a remote server The remote server will call this when it wants to turn a 3pid invite into a normal m.room.member invite. Args: event_dict: Dictionary containing the event body. """ assert_params_in_dict(event_dict, ["room_id"]) room_version = await self.store.get_room_version_id(event_dict["room_id"]) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. builder = self.event_builder_factory.new(room_version, event_dict) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) try: await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e await self._check_signature(event, context) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.send_membership_event(None, event, context) async def add_display_name_to_third_party_invite( self, room_version, event_dict, event, context ): key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], ) original_invite = None prev_state_ids = await context.get_prev_state_ids() original_invite_id = prev_state_ids.get(key) if original_invite_id: original_invite = await self.store.get_event( original_invite_id, allow_none=True ) if original_invite: # If the m.room.third_party_invite event's content is empty, it means the # invite has been revoked. In this case, we don't have to raise an error here # because the auth check will fail on the invite (because it's not able to # fetch public keys from the m.room.third_party_invite event's content, which # is empty). display_name = original_invite.content.get("display_name") event_dict["content"]["third_party_invite"]["display_name"] = display_name else: logger.info( "Could not find invite event for third_party_invite: %r", event_dict ) # We don't discard here as this is not the appropriate place to do # auth checks. If we need the invite and don't have it then the # auth check code will explode appropriately. builder = self.event_builder_factory.new(room_version, event_dict) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) EventValidator().validate_new(event, self.config) return (event, context) async def _check_signature(self, event, context): """ Checks that the signature in the event is consistent with its invite. Args: event (Event): The m.room.member event to check context (EventContext): Raises: AuthError: if signature didn't match any keys, or key has been revoked, SynapseError: if a transient error meant a key couldn't be checked for revocation. """ signed = event.content["third_party_invite"]["signed"] token = signed["token"] prev_state_ids = await context.get_prev_state_ids() invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token)) invite_event = None if invite_event_id: invite_event = await self.store.get_event(invite_event_id, allow_none=True) if not invite_event: raise AuthError(403, "Could not find invite") logger.debug("Checking auth on event %r", event.content) last_exception = None # type: Optional[Exception] # for each public key in the 3pid invite event for public_key_object in self.hs.get_auth().get_public_keys(invite_event): try: # for each sig on the third_party_invite block of the actual invite for server, signature_block in signed["signatures"].items(): for key_name, encoded_signature in signature_block.items(): if not key_name.startswith("ed25519:"): continue logger.debug( "Attempting to verify sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) try: public_key = public_key_object["public_key"] verify_key = decode_verify_key_bytes( key_name, decode_base64(public_key) ) verify_signed_json(signed, server, verify_key) logger.debug( "Successfully verified sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) except Exception: logger.info( "Failed to verify sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) raise try: if "key_validity_url" in public_key_object: await self._check_key_revocation( public_key, public_key_object["key_validity_url"] ) except Exception: logger.info( "Failed to query key_validity_url %s", public_key_object["key_validity_url"], ) raise return except Exception as e: last_exception = e if last_exception is None: # we can only get here if get_public_keys() returned an empty list # TODO: make this better raise RuntimeError("no public key in invite event") raise last_exception async def _check_key_revocation(self, public_key, url): """ Checks whether public_key has been revoked. Args: public_key (str): base-64 encoded public key. url (str): Key revocation URL. Raises: AuthError: if they key has been revoked. SynapseError: if a transient error meant a key couldn't be checked for revocation. """ try: response = await self.http_client.get_json(url, {"public_key": public_key}) except Exception: raise SynapseError(502, "Third party certificate could not be checked") if "valid" not in response or not response["valid"]: raise AuthError(403, "Third party certificate was invalid") async def persist_events_and_notify( self, room_id: str, event_and_contexts: Sequence[Tuple[EventBase, EventContext]], backfilled: bool = False, ) -> int: """Persists events and tells the notifier/pushers about them, if necessary. Args: room_id: The room ID of events being persisted. event_and_contexts: Sequence of events with their associated context that should be persisted. All events must belong to the same room. backfilled: Whether these events are a result of backfilling or not """ instance = self.config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: result = await self._send_events( instance_name=instance, store=self.store, room_id=room_id, event_and_contexts=event_and_contexts, backfilled=backfilled, ) return result["max_stream_id"] else: assert self.storage.persistence # Note that this returns the events that were persisted, which may not be # the same as were passed in if some were deduplicated due to transaction IDs. events, max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) if self._ephemeral_messages_enabled: for event in events: # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) if not backfilled: # Never notify for backfilled events for event in events: await self._notify_persisted_event(event, max_stream_token) return max_stream_token.stream async def _notify_persisted_event( self, event: EventBase, max_stream_token: RoomStreamToken ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. Args: event: max_stream_id: The max_stream_id returned by persist_events """ extra_users = [] if event.type == EventTypes.Member: target_user_id = event.state_key # We notify for memberships if its an invite for one of our # users if event.internal_metadata.is_outlier(): if event.membership != Membership.INVITE: if not self.is_mine_id(target_user_id): return target_user = UserID.from_string(target_user_id) extra_users.append(target_user) elif event.internal_metadata.is_outlier(): return # the event has been persisted so it should have a stream ordering. assert event.internal_metadata.stream_ordering event_pos = PersistedEventPosition( self._instance_name, event.internal_metadata.stream_ordering ) self.notifier.on_new_room_event( event, event_pos, max_stream_token, extra_users=extra_users ) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the server to join the room. Args: room_id """ if self.config.worker_app: await self._clean_room_for_join_client(room_id) else: await self.store.clean_room_for_join(room_id) async def get_room_complexity( self, remote_room_hosts: List[str], room_id: str ) -> Optional[dict]: """ Fetch the complexity of a remote room over federation. Args: remote_room_hosts (list[str]): The remote servers to ask. room_id (str): The room ID to ask about. Returns: Dict contains the complexity metric versions, while None means we could not fetch the complexity. """ for host in remote_room_hosts: res = await self.federation_client.get_room_complexity(host, room_id) # We got a result, return it. if res: return res # We fell off the bottom, couldn't get the complexity from anyone. Oh # well. return None
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_7
crossvul-python_data_bad_1315_1
# -*- coding: utf-8 -*- # See https://zulip.readthedocs.io/en/latest/subsystems/thumbnailing.html import base64 import os import sys import urllib from django.conf import settings from libthumbor import CryptoURL ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath('__file__')))) sys.path.append(ZULIP_PATH) from zthumbor.loaders.helpers import ( THUMBOR_S3_TYPE, THUMBOR_LOCAL_FILE_TYPE, THUMBOR_EXTERNAL_TYPE ) from zerver.lib.camo import get_camo_url def is_thumbor_enabled() -> bool: return settings.THUMBOR_URL != '' def user_uploads_or_external(url: str) -> bool: return url.startswith('http') or url.lstrip('/').startswith('user_uploads/') def get_source_type(url: str) -> str: if not url.startswith('/user_uploads/'): return THUMBOR_EXTERNAL_TYPE local_uploads_dir = settings.LOCAL_UPLOADS_DIR if local_uploads_dir: return THUMBOR_LOCAL_FILE_TYPE return THUMBOR_S3_TYPE def generate_thumbnail_url(path: str, size: str='0x0', is_camo_url: bool=False) -> str: if not (path.startswith('https://') or path.startswith('http://')): path = '/' + path if not is_thumbor_enabled(): if path.startswith('http://'): return get_camo_url(path) return path if not user_uploads_or_external(path): return path source_type = get_source_type(path) safe_url = base64.urlsafe_b64encode(path.encode()).decode('utf-8') image_url = '%s/source_type/%s' % (safe_url, source_type) width, height = map(int, size.split('x')) crypto = CryptoURL(key=settings.THUMBOR_KEY) smart_crop_enabled = True apply_filters = ['no_upscale()'] if is_camo_url: smart_crop_enabled = False apply_filters.append('quality(100)') if size != '0x0': apply_filters.append('sharpen(0.5,0.2,true)') encrypted_url = crypto.generate( width=width, height=height, smart=smart_crop_enabled, filters=apply_filters, image_url=image_url ) if settings.THUMBOR_URL == 'http://127.0.0.1:9995': # If THUMBOR_URL is the default then thumbor is hosted on same machine # as the Zulip server and we should serve a relative URL. # We add a /thumbor in front of the relative url because we make # use of a proxy pass to redirect request internally in Nginx to 9995 # port where thumbor is running. thumbnail_url = '/thumbor' + encrypted_url else: thumbnail_url = urllib.parse.urljoin(settings.THUMBOR_URL, encrypted_url) return thumbnail_url
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1315_1
crossvul-python_data_good_4386_0
"""Tornado handlers for logging into the Jupyter Server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os import uuid from urllib.parse import urlparse from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import JupyterHandler class LoginHandler(JupyterHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url # protect chrome users from mishandling unescaped backslashes. # \ is not valid in urls, but some browsers treat it as / # instead of %5C, causing `\\` to behave as `//` url = url.replace("\\", "%5C") parsed = urlparse(url) if parsed.netloc or not (parsed.path + "/").startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if parsed.netloc: # if full URL, run our cross-origin check: origin = '%s://%s' % (parsed.scheme, parsed.netloc) origin = origin.lower() if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get("allow_password_change"): config_dir = self.settings.get("config_dir") config_file = os.path.join( config_dir, "jupyter_notebook_config.json" ) set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The Jupyter server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/good_4386_0
crossvul-python_data_good_3250_2
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from security_monkey import app, db from flask_wtf.csrf import generate_csrf from security_monkey.auth.models import RBACRole from security_monkey.decorators import crossdomain from flask_restful import fields, marshal, Resource, reqparse from flask_login import current_user ORIGINS = [ 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('WEB_PORT')), # Adding this next one so you can also access the dart UI by prepending /static to the path. 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('API_PORT')), 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('NGINX_PORT')), 'https://{}:80'.format(app.config.get('FQDN')) ] ##### Marshal Datastructures ##### # Used by RevisionGet, RevisionList, ItemList REVISION_FIELDS = { 'id': fields.Integer, 'date_created': fields.String, 'date_last_ephemeral_change': fields.String, 'active': fields.Boolean, 'item_id': fields.Integer } # Used by RevisionList, ItemGet, ItemList ITEM_FIELDS = { 'id': fields.Integer, 'region': fields.String, 'name': fields.String } # Used by ItemList, Justify AUDIT_FIELDS = { 'id': fields.Integer, 'score': fields.Integer, 'issue': fields.String, 'notes': fields.String, 'justified': fields.Boolean, 'justification': fields.String, 'justified_date': fields.String, 'item_id': fields.Integer } ## Single Use Marshal Objects ## # SINGLE USE - RevisionGet REVISION_COMMENT_FIELDS = { 'id': fields.Integer, 'revision_id': fields.Integer, 'date_created': fields.String, 'text': fields.String } # SINGLE USE - ItemGet ITEM_COMMENT_FIELDS = { 'id': fields.Integer, 'date_created': fields.String, 'text': fields.String, 'item_id': fields.Integer } # SINGLE USE - UserSettings USER_SETTINGS_FIELDS = { # 'id': fields.Integer, 'daily_audit_email': fields.Boolean, 'change_reports': fields.String } # SINGLE USE - AccountGet ACCOUNT_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'identifier': fields.String, 'notes': fields.String, 'active': fields.Boolean, 'third_party': fields.Boolean, 'account_type': fields.String } USER_FIELDS = { 'id': fields.Integer, 'active': fields.Boolean, 'email': fields.String, 'role': fields.String, 'confirmed_at': fields.String, 'daily_audit_email': fields.Boolean, 'change_reports': fields.String, 'last_login_at': fields.String, 'current_login_at': fields.String, 'login_count': fields.Integer, 'last_login_ip': fields.String, 'current_login_ip': fields.String } ROLE_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'description': fields.String, } WHITELIST_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'notes': fields.String, 'cidr': fields.String } IGNORELIST_FIELDS = { 'id': fields.Integer, 'prefix': fields.String, 'notes': fields.String, } AUDITORSETTING_FIELDS = { 'id': fields.Integer, 'disabled': fields.Boolean, 'issue_text': fields.String } ITEM_LINK_FIELDS = { 'id': fields.Integer, 'name': fields.String } class AuthenticatedService(Resource): def __init__(self): self.reqparse = reqparse.RequestParser() super(AuthenticatedService, self).__init__() self.auth_dict = dict() if current_user.is_authenticated: roles_marshal = [] for role in current_user.roles: roles_marshal.append(marshal(role.__dict__, ROLE_FIELDS)) roles_marshal.append({"name": current_user.role}) for role in RBACRole.roles[current_user.role].get_parents(): roles_marshal.append({"name": role.name}) self.auth_dict = { "authenticated": True, "user": current_user.email, "roles": roles_marshal } else: if app.config.get('FRONTED_BY_NGINX'): url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login') else: url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login') self.auth_dict = { "authenticated": False, "user": None, "url": url } @app.after_request @crossdomain(allowed_origins=ORIGINS) def after(response): response.set_cookie('XSRF-COOKIE', generate_csrf()) return response
./CrossVul/dataset_final_sorted/CWE-601/py/good_3250_2
crossvul-python_data_good_1915_10
# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib.parse from typing import List, Optional from netaddr import AddrFormatError, IPAddress, IPSet from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import ( IProtocolFactory, IReactorCore, IStreamClientEndpoint, ) from twisted.web.client import URI, Agent, HTTPConnectionPool from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http.client import BlacklistingAgentWrapper from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util import Clock logger = logging.getLogger(__name__) @implementer(IAgent) class MatrixFederationAgent: """An Agent-like thing which provides a `request` method which correctly handles resolving matrix server names when using matrix://. Handles standard https URIs as normal. Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.) Args: reactor: twisted reactor to use for underlying requests tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. user_agent: The user agent header to use for federation requests. _srv_resolver: SrvResolver implementation to use for looking up SRV records. None to use a default implementation. _well_known_resolver: WellKnownResolver to use to perform well-known lookups. None to use a default implementation. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, ip_blacklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): self._reactor = reactor self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 self._agent = Agent.usingEndpointFactory( self._reactor, MatrixHostnameEndpointFactory( reactor, tls_client_options_factory, _srv_resolver ), pool=self._pool, ) self.user_agent = user_agent if _well_known_resolver is None: # Note that the name resolver has already been wrapped in a # IPBlacklistingResolver by MatrixFederationHttpClient. _well_known_resolver = WellKnownResolver( self._reactor, agent=BlacklistingAgentWrapper( Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, ), self._reactor, ip_blacklist=ip_blacklist, ), user_agent=self.user_agent, ) self._well_known_resolver = _well_known_resolver @defer.inlineCallbacks def request( self, method: bytes, uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, ) -> defer.Deferred: """ Args: method: HTTP method: GET/POST/etc uri: Absolute URI to be retrieved headers: HTTP headers to send with the request, or None to send no extra headers. bodyProducer: An object which can generate bytes to make up the body of this request (for example, the properly encoded contents of a file for a file upload). Or None if the request is to have no body. Returns: Deferred[twisted.web.iweb.IResponse]: fires when the header of the response has been received (regardless of the response status code). Fails if there is any problem which prevents that response from being received (including problems that prevent the request from being sent). """ # We use urlparse as that will set `port` to None if there is no # explicit port. parsed_uri = urllib.parse.urlparse(uri) # There must be a valid hostname. assert parsed_uri.hostname # If this is a matrix:// URI check if the server has delegated matrix # traffic using well-known delegation. # # We have to do this here and not in the endpoint as we need to rewrite # the host header with the delegated server name. delegated_server = None if ( parsed_uri.scheme == b"matrix" and not _is_ip_literal(parsed_uri.hostname) and not parsed_uri.port ): well_known_result = yield defer.ensureDeferred( self._well_known_resolver.get_well_known(parsed_uri.hostname) ) delegated_server = well_known_result.delegated_server if delegated_server: # Ok, the server has delegated matrix traffic to somewhere else, so # lets rewrite the URL to replace the server with the delegated # server name. uri = urllib.parse.urlunparse( ( parsed_uri.scheme, delegated_server, parsed_uri.path, parsed_uri.params, parsed_uri.query, parsed_uri.fragment, ) ) parsed_uri = urllib.parse.urlparse(uri) # We need to make sure the host header is set to the netloc of the # server and that a user-agent is provided. if headers is None: headers = Headers() else: headers = headers.copy() if not headers.hasHeader(b"host"): headers.addRawHeader(b"host", parsed_uri.netloc) if not headers.hasHeader(b"user-agent"): headers.addRawHeader(b"user-agent", self.user_agent) res = yield make_deferred_yieldable( self._agent.request(method, uri, headers, bodyProducer) ) return res @implementer(IAgentEndpointFactory) class MatrixHostnameEndpointFactory: """Factory for MatrixHostnameEndpoint for parsing to an Agent. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], srv_resolver: Optional[SrvResolver], ): self._reactor = reactor self._tls_client_options_factory = tls_client_options_factory if srv_resolver is None: srv_resolver = SrvResolver() self._srv_resolver = srv_resolver def endpointForURI(self, parsed_uri): return MatrixHostnameEndpoint( self._reactor, self._tls_client_options_factory, self._srv_resolver, parsed_uri, ) @implementer(IStreamClientEndpoint) class MatrixHostnameEndpoint: """An endpoint that resolves matrix:// URLs using Matrix server name resolution (i.e. via SRV). Does not check for well-known delegation. Args: reactor: twisted reactor to use for underlying requests tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. srv_resolver: The SRV resolver to use parsed_uri: The parsed URI that we're wanting to connect to. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], srv_resolver: SrvResolver, parsed_uri: URI, ): self._reactor = reactor self._parsed_uri = parsed_uri # set up the TLS connection params # # XXX disabling TLS is really only supported here for the benefit of the # unit tests. We should make the UTs cope with TLS rather than having to make # the code support the unit tests. if tls_client_options_factory is None: self._tls_options = None else: self._tls_options = tls_client_options_factory.get_options( self._parsed_uri.host ) self._srv_resolver = srv_resolver def connect(self, protocol_factory: IProtocolFactory) -> defer.Deferred: """Implements IStreamClientEndpoint interface """ return run_in_background(self._do_connect, protocol_factory) async def _do_connect(self, protocol_factory: IProtocolFactory) -> None: first_exception = None server_list = await self._resolve_server() for server in server_list: host = server.host port = server.port try: logger.debug("Connecting to %s:%i", host.decode("ascii"), port) endpoint = HostnameEndpoint(self._reactor, host, port) if self._tls_options: endpoint = wrapClientTLS(self._tls_options, endpoint) result = await make_deferred_yieldable( endpoint.connect(protocol_factory) ) return result except Exception as e: logger.info( "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e ) if not first_exception: first_exception = e # We return the first failure because that's probably the most interesting. if first_exception: raise first_exception # This shouldn't happen as we should always have at least one host/port # to try and if that doesn't work then we'll have an exception. raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,)) async def _resolve_server(self) -> List[Server]: """Resolves the server name to a list of hosts and ports to attempt to connect to. """ if self._parsed_uri.scheme != b"matrix": return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)] # Note: We don't do well-known lookup as that needs to have happened # before now, due to needing to rewrite the Host header of the HTTP # request. # We reparse the URI so that defaultPort is -1 rather than 80 parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes()) host = parsed_uri.hostname port = parsed_uri.port # If there is an explicit port or the host is an IP address we bypass # SRV lookups and just use the given host/port. if port or _is_ip_literal(host): return [Server(host, port or 8448)] server_list = await self._srv_resolver.resolve_service(b"_matrix._tcp." + host) if server_list: return server_list # No SRV records, so we fallback to host and 8448 return [Server(host, 8448)] def _is_ip_literal(host: bytes) -> bool: """Test if the given host name is either an IPv4 or IPv6 literal. Args: host: The host name to check Returns: True if the hostname is an IP address literal. """ host_str = host.decode("ascii") try: IPAddress(host_str) return True except AddrFormatError: return False
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_10
crossvul-python_data_good_1915_6
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib from typing import Any, Dict, Optional from synapse.api.constants import Membership from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.api.urls import ( FEDERATION_UNSTABLE_PREFIX, FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX, ) from synapse.logging.utils import log_function logger = logging.getLogger(__name__) class TransportLayerClient: """Sends federation HTTP requests to other servers""" def __init__(self, hs): self.server_name = hs.hostname self.client = hs.get_federation_http_client() @log_function def get_room_state_ids(self, destination, room_id, event_id): """ Requests all state for a given room from the given server at the given event. Returns the state's event_id's Args: destination (str): The host name of the remote homeserver we want to get the state from. context (str): The name of the context we want the state of event_id (str): The event we want the context at. Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id) path = _create_v1_path("/state_ids/%s", room_id) return self.client.get_json( destination, path=path, args={"event_id": event_id}, try_trailing_slash_on_400=True, ) @log_function def get_event(self, destination, event_id, timeout=None): """ Requests the pdu with give id and origin from the given server. Args: destination (str): The host name of the remote homeserver we want to get the state from. event_id (str): The id of the event being requested. timeout (int): How long to try (in ms) the destination for before giving up. None indicates no timeout. Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id) path = _create_v1_path("/event/%s", event_id) return self.client.get_json( destination, path=path, timeout=timeout, try_trailing_slash_on_400=True ) @log_function def backfill(self, destination, room_id, event_tuples, limit): """ Requests `limit` previous PDUs in a given context before list of PDUs. Args: dest (str) room_id (str) event_tuples (list) limit (int) Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug( "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s", destination, room_id, event_tuples, str(limit), ) if not event_tuples: # TODO: raise? return path = _create_v1_path("/backfill/%s", room_id) args = {"v": event_tuples, "limit": [str(limit)]} return self.client.get_json( destination, path=path, args=args, try_trailing_slash_on_400=True ) @log_function async def send_transaction(self, transaction, json_data_callback=None): """ Sends the given Transaction to its destination Args: transaction (Transaction) Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Fails with ``HTTPRequestException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ logger.debug( "send_data dest=%s, txid=%s", transaction.destination, transaction.transaction_id, ) if transaction.destination == self.server_name: raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is # generated by the json_data_callback. json_data = transaction.get_dict() path = _create_v1_path("/send/%s", transaction.transaction_id) response = await self.client.put_json( transaction.destination, path=path, data=json_data, json_data_callback=json_data_callback, long_retries=True, backoff_on_404=True, # If we get a 404 the other side has gone try_trailing_slash_on_400=True, ) return response @log_function async def make_query( self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False ): path = _create_v1_path("/query/%s", query_type) content = await self.client.get_json( destination=destination, path=path, args=args, retry_on_dns_fail=retry_on_dns_fail, timeout=10000, ignore_backoff=ignore_backoff, ) return content @log_function async def make_membership_event( self, destination, room_id, user_id, membership, params ): """Asks a remote server to build and sign us a membership event Note that this does not append any events to any graphs. Args: destination (str): address of remote homeserver room_id (str): room to join/leave user_id (str): user to be joined/left membership (str): one of join/leave params (dict[str, str|Iterable[str]]): Query parameters to include in the request. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body (ie, the new event). Fails with ``HTTPRequestException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if the remote destination is not in our federation whitelist """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: raise RuntimeError( "make_membership_event called with membership='%s', must be one of %s" % (membership, ",".join(valid_memberships)) ) path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id) ignore_backoff = False retry_on_dns_fail = False if membership == Membership.LEAVE: # we particularly want to do our best to send leave events. The # problem is that if it fails, we won't retry it later, so if the # remote server was just having a momentary blip, the room will be # out of sync. ignore_backoff = True retry_on_dns_fail = True content = await self.client.get_json( destination=destination, path=path, args=params, retry_on_dns_fail=retry_on_dns_fail, timeout=20000, ignore_backoff=ignore_backoff, ) return content @log_function async def send_join_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content ) return response @log_function async def send_join_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content ) return response @log_function async def send_leave_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, # we want to do our best to send this through. The problem is # that if it fails, we won't retry it later, so if the remote # server was just having a momentary blip, the room will be out of # sync. ignore_backoff=True, ) return response @log_function async def send_leave_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/send_leave/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, # we want to do our best to send this through. The problem is # that if it fails, we won't retry it later, so if the remote # server was just having a momentary blip, the room will be out of # sync. ignore_backoff=True, ) return response @log_function async def send_invite_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/invite/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) return response @log_function async def send_invite_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/invite/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) return response @log_function async def get_public_rooms( self, remote_server: str, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[Dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ): """Get the list of public rooms from a remote homeserver See synapse.federation.federation_client.FederationClient.get_public_rooms for more information. """ if search_filter: # this uses MSC2197 (Search Filtering over Federation) path = _create_v1_path("/publicRooms") data = { "include_all_networks": "true" if include_all_networks else "false" } # type: Dict[str, Any] if third_party_instance_id: data["third_party_instance_id"] = third_party_instance_id if limit: data["limit"] = str(limit) if since_token: data["since"] = since_token data["filter"] = search_filter try: response = await self.client.post_json( destination=remote_server, path=path, data=data, ignore_backoff=True ) except HttpResponseException as e: if e.code == 403: raise SynapseError( 403, "You are not allowed to view the public rooms list of %s" % (remote_server,), errcode=Codes.FORBIDDEN, ) raise else: path = _create_v1_path("/publicRooms") args = { "include_all_networks": "true" if include_all_networks else "false" } # type: Dict[str, Any] if third_party_instance_id: args["third_party_instance_id"] = (third_party_instance_id,) if limit: args["limit"] = [str(limit)] if since_token: args["since"] = [since_token] try: response = await self.client.get_json( destination=remote_server, path=path, args=args, ignore_backoff=True ) except HttpResponseException as e: if e.code == 403: raise SynapseError( 403, "You are not allowed to view the public rooms list of %s" % (remote_server,), errcode=Codes.FORBIDDEN, ) raise return response @log_function async def exchange_third_party_invite(self, destination, room_id, event_dict): path = _create_v1_path("/exchange_third_party_invite/%s", room_id) response = await self.client.put_json( destination=destination, path=path, data=event_dict ) return response @log_function async def get_event_auth(self, destination, room_id, event_id): path = _create_v1_path("/event_auth/%s/%s", room_id, event_id) content = await self.client.get_json(destination=destination, path=path) return content @log_function async def query_client_keys(self, destination, query_content, timeout): """Query the device keys for a list of user ids hosted on a remote server. Request: { "device_keys": { "<user_id>": ["<device_id>"] } } Response: { "device_keys": { "<user_id>": { "<device_id>": {...} } }, "master_key": { "<user_id>": {...} } }, "self_signing_key": { "<user_id>": {...} } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/keys/query") content = await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) return content @log_function async def query_user_devices(self, destination, user_id, timeout): """Query the devices for a user id hosted on a remote server. Response: { "stream_id": "...", "devices": [ { ... } ], "master_key": { "user_id": "<user_id>", "usage": [...], "keys": {...}, "signatures": { "<user_id>": {...} } }, "self_signing_key": { "user_id": "<user_id>", "usage": [...], "keys": {...}, "signatures": { "<user_id>": {...} } } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/devices/%s", user_id) content = await self.client.get_json( destination=destination, path=path, timeout=timeout ) return content @log_function async def claim_client_keys(self, destination, query_content, timeout): """Claim one-time keys for a list of devices hosted on a remote server. Request: { "one_time_keys": { "<user_id>": { "<device_id>": "<algorithm>" } } } Response: { "device_keys": { "<user_id>": { "<device_id>": { "<algorithm>:<key_id>": "<key_base64>" } } } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing the one-time keys. """ path = _create_v1_path("/user/keys/claim") content = await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) return content @log_function async def get_missing_events( self, destination, room_id, earliest_events, latest_events, limit, min_depth, timeout, ): path = _create_v1_path("/get_missing_events/%s", room_id) content = await self.client.post_json( destination=destination, path=path, data={ "limit": int(limit), "min_depth": int(min_depth), "earliest_events": earliest_events, "latest_events": latest_events, }, timeout=timeout, ) return content @log_function def get_group_profile(self, destination, group_id, requester_user_id): """Get a group profile """ path = _create_v1_path("/groups/%s/profile", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_profile(self, destination, group_id, requester_user_id, content): """Update a remote group profile Args: destination (str) group_id (str) requester_user_id (str) content (dict): The new profile of the group """ path = _create_v1_path("/groups/%s/profile", group_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def get_group_summary(self, destination, group_id, requester_user_id): """Get a group summary """ path = _create_v1_path("/groups/%s/summary", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_rooms_in_group(self, destination, group_id, requester_user_id): """Get all rooms in a group """ path = _create_v1_path("/groups/%s/rooms", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) def add_room_to_group( self, destination, group_id, requester_user_id, room_id, content ): """Add a room to a group """ path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) def update_room_in_group( self, destination, group_id, requester_user_id, room_id, config_key, content ): """Update room in group """ path = _create_v1_path( "/groups/%s/room/%s/config/%s", group_id, room_id, config_key ) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_users_in_group(self, destination, group_id, requester_user_id): """Get users in a group """ path = _create_v1_path("/groups/%s/users", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_invited_users_in_group(self, destination, group_id, requester_user_id): """Get users that have been invited to a group """ path = _create_v1_path("/groups/%s/invited_users", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def accept_group_invite(self, destination, group_id, user_id, content): """Accept a group invite """ path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def join_group(self, destination, group_id, user_id, content): """Attempts to join a group """ path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def invite_to_group( self, destination, group_id, user_id, requester_user_id, content ): """Invite a user to a group """ path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def invite_to_group_notification(self, destination, group_id, user_id, content): """Sent by group server to inform a user's server that they have been invited. """ path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def remove_user_from_group( self, destination, group_id, requester_user_id, user_id, content ): """Remove a user from a group """ path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def remove_user_from_group_notification( self, destination, group_id, user_id, content ): """Sent by group server to inform a user's server that they have been kicked from the group. """ path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def renew_group_attestation(self, destination, group_id, user_id, content): """Sent by either a group server or a user's server to periodically update the attestations """ path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def update_group_summary_room( self, destination, group_id, user_id, room_id, category_id, content ): """Update a room entry in a group summary """ if category_id: path = _create_v1_path( "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_summary_room( self, destination, group_id, user_id, room_id, category_id ): """Delete a room entry in a group summary """ if category_id: path = _create_v1_path( "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": user_id}, ignore_backoff=True, ) @log_function def get_group_categories(self, destination, group_id, requester_user_id): """Get all categories in a group """ path = _create_v1_path("/groups/%s/categories", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_category(self, destination, group_id, requester_user_id, category_id): """Get category info in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_category( self, destination, group_id, requester_user_id, category_id, content ): """Update a category in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_category( self, destination, group_id, requester_user_id, category_id ): """Delete a category in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_roles(self, destination, group_id, requester_user_id): """Get all roles in a group """ path = _create_v1_path("/groups/%s/roles", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_role(self, destination, group_id, requester_user_id, role_id): """Get a roles info """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_role( self, destination, group_id, requester_user_id, role_id, content ): """Update a role in a group """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_role(self, destination, group_id, requester_user_id, role_id): """Delete a role in a group """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_summary_user( self, destination, group_id, requester_user_id, user_id, role_id, content ): """Update a users entry in a group """ if role_id: path = _create_v1_path( "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id ) else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def set_group_join_policy(self, destination, group_id, requester_user_id, content): """Sets the join policy for a group """ path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id) return self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_summary_user( self, destination, group_id, requester_user_id, user_id, role_id ): """Delete a users entry in a group """ if role_id: path = _create_v1_path( "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id ) else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) def bulk_get_publicised_groups(self, destination, user_ids): """Get the groups a list of users are publicising """ path = _create_v1_path("/get_groups_publicised") content = {"user_ids": user_ids} return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) def get_room_complexity(self, destination, room_id): """ Args: destination (str): The remote server room_id (str): The room ID to ask about. """ path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) return self.client.get_json(destination=destination, path=path) def _create_path(federation_prefix, path, *args): """ Ensures that all args are url encoded. """ return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) def _create_v1_path(path, *args): """Creates a path against V1 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v1_path("/event/%s", event_id) Args: path (str): String template for the path args: ([str]): Args to insert into path. Each arg will be url encoded Returns: str """ return _create_path(FEDERATION_V1_PREFIX, path, *args) def _create_v2_path(path, *args): """Creates a path against V2 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v2_path("/event/%s", event_id) Args: path (str): String template for the path args: ([str]): Args to insert into path. Each arg will be url encoded Returns: str """ return _create_path(FEDERATION_V2_PREFIX, path, *args)
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_6
crossvul-python_data_bad_1915_6
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib from typing import Any, Dict, Optional from synapse.api.constants import Membership from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.api.urls import ( FEDERATION_UNSTABLE_PREFIX, FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX, ) from synapse.logging.utils import log_function logger = logging.getLogger(__name__) class TransportLayerClient: """Sends federation HTTP requests to other servers""" def __init__(self, hs): self.server_name = hs.hostname self.client = hs.get_http_client() @log_function def get_room_state_ids(self, destination, room_id, event_id): """ Requests all state for a given room from the given server at the given event. Returns the state's event_id's Args: destination (str): The host name of the remote homeserver we want to get the state from. context (str): The name of the context we want the state of event_id (str): The event we want the context at. Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id) path = _create_v1_path("/state_ids/%s", room_id) return self.client.get_json( destination, path=path, args={"event_id": event_id}, try_trailing_slash_on_400=True, ) @log_function def get_event(self, destination, event_id, timeout=None): """ Requests the pdu with give id and origin from the given server. Args: destination (str): The host name of the remote homeserver we want to get the state from. event_id (str): The id of the event being requested. timeout (int): How long to try (in ms) the destination for before giving up. None indicates no timeout. Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id) path = _create_v1_path("/event/%s", event_id) return self.client.get_json( destination, path=path, timeout=timeout, try_trailing_slash_on_400=True ) @log_function def backfill(self, destination, room_id, event_tuples, limit): """ Requests `limit` previous PDUs in a given context before list of PDUs. Args: dest (str) room_id (str) event_tuples (list) limit (int) Returns: Awaitable: Results in a dict received from the remote homeserver. """ logger.debug( "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s", destination, room_id, event_tuples, str(limit), ) if not event_tuples: # TODO: raise? return path = _create_v1_path("/backfill/%s", room_id) args = {"v": event_tuples, "limit": [str(limit)]} return self.client.get_json( destination, path=path, args=args, try_trailing_slash_on_400=True ) @log_function async def send_transaction(self, transaction, json_data_callback=None): """ Sends the given Transaction to its destination Args: transaction (Transaction) Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Fails with ``HTTPRequestException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ logger.debug( "send_data dest=%s, txid=%s", transaction.destination, transaction.transaction_id, ) if transaction.destination == self.server_name: raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is # generated by the json_data_callback. json_data = transaction.get_dict() path = _create_v1_path("/send/%s", transaction.transaction_id) response = await self.client.put_json( transaction.destination, path=path, data=json_data, json_data_callback=json_data_callback, long_retries=True, backoff_on_404=True, # If we get a 404 the other side has gone try_trailing_slash_on_400=True, ) return response @log_function async def make_query( self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False ): path = _create_v1_path("/query/%s", query_type) content = await self.client.get_json( destination=destination, path=path, args=args, retry_on_dns_fail=retry_on_dns_fail, timeout=10000, ignore_backoff=ignore_backoff, ) return content @log_function async def make_membership_event( self, destination, room_id, user_id, membership, params ): """Asks a remote server to build and sign us a membership event Note that this does not append any events to any graphs. Args: destination (str): address of remote homeserver room_id (str): room to join/leave user_id (str): user to be joined/left membership (str): one of join/leave params (dict[str, str|Iterable[str]]): Query parameters to include in the request. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body (ie, the new event). Fails with ``HTTPRequestException`` if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if the remote destination is not in our federation whitelist """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: raise RuntimeError( "make_membership_event called with membership='%s', must be one of %s" % (membership, ",".join(valid_memberships)) ) path = _create_v1_path("/make_%s/%s/%s", membership, room_id, user_id) ignore_backoff = False retry_on_dns_fail = False if membership == Membership.LEAVE: # we particularly want to do our best to send leave events. The # problem is that if it fails, we won't retry it later, so if the # remote server was just having a momentary blip, the room will be # out of sync. ignore_backoff = True retry_on_dns_fail = True content = await self.client.get_json( destination=destination, path=path, args=params, retry_on_dns_fail=retry_on_dns_fail, timeout=20000, ignore_backoff=ignore_backoff, ) return content @log_function async def send_join_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content ) return response @log_function async def send_join_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content ) return response @log_function async def send_leave_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, # we want to do our best to send this through. The problem is # that if it fails, we won't retry it later, so if the remote # server was just having a momentary blip, the room will be out of # sync. ignore_backoff=True, ) return response @log_function async def send_leave_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/send_leave/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, # we want to do our best to send this through. The problem is # that if it fails, we won't retry it later, so if the remote # server was just having a momentary blip, the room will be out of # sync. ignore_backoff=True, ) return response @log_function async def send_invite_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/invite/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) return response @log_function async def send_invite_v2(self, destination, room_id, event_id, content): path = _create_v2_path("/invite/%s/%s", room_id, event_id) response = await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) return response @log_function async def get_public_rooms( self, remote_server: str, limit: Optional[int] = None, since_token: Optional[str] = None, search_filter: Optional[Dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, ): """Get the list of public rooms from a remote homeserver See synapse.federation.federation_client.FederationClient.get_public_rooms for more information. """ if search_filter: # this uses MSC2197 (Search Filtering over Federation) path = _create_v1_path("/publicRooms") data = { "include_all_networks": "true" if include_all_networks else "false" } # type: Dict[str, Any] if third_party_instance_id: data["third_party_instance_id"] = third_party_instance_id if limit: data["limit"] = str(limit) if since_token: data["since"] = since_token data["filter"] = search_filter try: response = await self.client.post_json( destination=remote_server, path=path, data=data, ignore_backoff=True ) except HttpResponseException as e: if e.code == 403: raise SynapseError( 403, "You are not allowed to view the public rooms list of %s" % (remote_server,), errcode=Codes.FORBIDDEN, ) raise else: path = _create_v1_path("/publicRooms") args = { "include_all_networks": "true" if include_all_networks else "false" } # type: Dict[str, Any] if third_party_instance_id: args["third_party_instance_id"] = (third_party_instance_id,) if limit: args["limit"] = [str(limit)] if since_token: args["since"] = [since_token] try: response = await self.client.get_json( destination=remote_server, path=path, args=args, ignore_backoff=True ) except HttpResponseException as e: if e.code == 403: raise SynapseError( 403, "You are not allowed to view the public rooms list of %s" % (remote_server,), errcode=Codes.FORBIDDEN, ) raise return response @log_function async def exchange_third_party_invite(self, destination, room_id, event_dict): path = _create_v1_path("/exchange_third_party_invite/%s", room_id) response = await self.client.put_json( destination=destination, path=path, data=event_dict ) return response @log_function async def get_event_auth(self, destination, room_id, event_id): path = _create_v1_path("/event_auth/%s/%s", room_id, event_id) content = await self.client.get_json(destination=destination, path=path) return content @log_function async def query_client_keys(self, destination, query_content, timeout): """Query the device keys for a list of user ids hosted on a remote server. Request: { "device_keys": { "<user_id>": ["<device_id>"] } } Response: { "device_keys": { "<user_id>": { "<device_id>": {...} } }, "master_key": { "<user_id>": {...} } }, "self_signing_key": { "<user_id>": {...} } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/keys/query") content = await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) return content @log_function async def query_user_devices(self, destination, user_id, timeout): """Query the devices for a user id hosted on a remote server. Response: { "stream_id": "...", "devices": [ { ... } ], "master_key": { "user_id": "<user_id>", "usage": [...], "keys": {...}, "signatures": { "<user_id>": {...} } }, "self_signing_key": { "user_id": "<user_id>", "usage": [...], "keys": {...}, "signatures": { "<user_id>": {...} } } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/devices/%s", user_id) content = await self.client.get_json( destination=destination, path=path, timeout=timeout ) return content @log_function async def claim_client_keys(self, destination, query_content, timeout): """Claim one-time keys for a list of devices hosted on a remote server. Request: { "one_time_keys": { "<user_id>": { "<device_id>": "<algorithm>" } } } Response: { "device_keys": { "<user_id>": { "<device_id>": { "<algorithm>:<key_id>": "<key_base64>" } } } } Args: destination(str): The server to query. query_content(dict): The user ids to query. Returns: A dict containing the one-time keys. """ path = _create_v1_path("/user/keys/claim") content = await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) return content @log_function async def get_missing_events( self, destination, room_id, earliest_events, latest_events, limit, min_depth, timeout, ): path = _create_v1_path("/get_missing_events/%s", room_id) content = await self.client.post_json( destination=destination, path=path, data={ "limit": int(limit), "min_depth": int(min_depth), "earliest_events": earliest_events, "latest_events": latest_events, }, timeout=timeout, ) return content @log_function def get_group_profile(self, destination, group_id, requester_user_id): """Get a group profile """ path = _create_v1_path("/groups/%s/profile", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_profile(self, destination, group_id, requester_user_id, content): """Update a remote group profile Args: destination (str) group_id (str) requester_user_id (str) content (dict): The new profile of the group """ path = _create_v1_path("/groups/%s/profile", group_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def get_group_summary(self, destination, group_id, requester_user_id): """Get a group summary """ path = _create_v1_path("/groups/%s/summary", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_rooms_in_group(self, destination, group_id, requester_user_id): """Get all rooms in a group """ path = _create_v1_path("/groups/%s/rooms", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) def add_room_to_group( self, destination, group_id, requester_user_id, room_id, content ): """Add a room to a group """ path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) def update_room_in_group( self, destination, group_id, requester_user_id, room_id, config_key, content ): """Update room in group """ path = _create_v1_path( "/groups/%s/room/%s/config/%s", group_id, room_id, config_key ) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_users_in_group(self, destination, group_id, requester_user_id): """Get users in a group """ path = _create_v1_path("/groups/%s/users", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_invited_users_in_group(self, destination, group_id, requester_user_id): """Get users that have been invited to a group """ path = _create_v1_path("/groups/%s/invited_users", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def accept_group_invite(self, destination, group_id, user_id, content): """Accept a group invite """ path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def join_group(self, destination, group_id, user_id, content): """Attempts to join a group """ path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def invite_to_group( self, destination, group_id, user_id, requester_user_id, content ): """Invite a user to a group """ path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def invite_to_group_notification(self, destination, group_id, user_id, content): """Sent by group server to inform a user's server that they have been invited. """ path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def remove_user_from_group( self, destination, group_id, requester_user_id, user_id, content ): """Remove a user from a group """ path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def remove_user_from_group_notification( self, destination, group_id, user_id, content ): """Sent by group server to inform a user's server that they have been kicked from the group. """ path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def renew_group_attestation(self, destination, group_id, user_id, content): """Sent by either a group server or a user's server to periodically update the attestations """ path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id) return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function def update_group_summary_room( self, destination, group_id, user_id, room_id, category_id, content ): """Update a room entry in a group summary """ if category_id: path = _create_v1_path( "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_summary_room( self, destination, group_id, user_id, room_id, category_id ): """Delete a room entry in a group summary """ if category_id: path = _create_v1_path( "/groups/%s/summary/categories/%s/rooms/%s", group_id, category_id, room_id, ) else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": user_id}, ignore_backoff=True, ) @log_function def get_group_categories(self, destination, group_id, requester_user_id): """Get all categories in a group """ path = _create_v1_path("/groups/%s/categories", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_category(self, destination, group_id, requester_user_id, category_id): """Get category info in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_category( self, destination, group_id, requester_user_id, category_id, content ): """Update a category in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_category( self, destination, group_id, requester_user_id, category_id ): """Delete a category in a group """ path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_roles(self, destination, group_id, requester_user_id): """Get all roles in a group """ path = _create_v1_path("/groups/%s/roles", group_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def get_group_role(self, destination, group_id, requester_user_id, role_id): """Get a roles info """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_role( self, destination, group_id, requester_user_id, role_id, content ): """Update a role in a group """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_role(self, destination, group_id, requester_user_id, role_id): """Delete a role in a group """ path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @log_function def update_group_summary_user( self, destination, group_id, requester_user_id, user_id, role_id, content ): """Update a users entry in a group """ if role_id: path = _create_v1_path( "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id ) else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) return self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def set_group_join_policy(self, destination, group_id, requester_user_id, content): """Sets the join policy for a group """ path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id) return self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @log_function def delete_group_summary_user( self, destination, group_id, requester_user_id, user_id, role_id ): """Delete a users entry in a group """ if role_id: path = _create_v1_path( "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id ) else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) return self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) def bulk_get_publicised_groups(self, destination, user_ids): """Get the groups a list of users are publicising """ path = _create_v1_path("/get_groups_publicised") content = {"user_ids": user_ids} return self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) def get_room_complexity(self, destination, room_id): """ Args: destination (str): The remote server room_id (str): The room ID to ask about. """ path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) return self.client.get_json(destination=destination, path=path) def _create_path(federation_prefix, path, *args): """ Ensures that all args are url encoded. """ return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) def _create_v1_path(path, *args): """Creates a path against V1 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v1_path("/event/%s", event_id) Args: path (str): String template for the path args: ([str]): Args to insert into path. Each arg will be url encoded Returns: str """ return _create_path(FEDERATION_V1_PREFIX, path, *args) def _create_v2_path(path, *args): """Creates a path against V2 federation API from the path template and args. Ensures that all args are url encoded. Example: _create_v2_path("/event/%s", event_id) Args: path (str): String template for the path args: ([str]): Args to insert into path. Each arg will be url encoded Returns: str """ return _create_path(FEDERATION_V2_PREFIX, path, *args)
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_6
crossvul-python_data_bad_1315_0
# Zulip's main markdown implementation. See docs/subsystems/markdown.md for # detailed documentation on our markdown syntax. from typing import (Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, TypeVar, Union, cast) from mypy_extensions import TypedDict from typing.re import Match, Pattern import markdown import logging import traceback import urllib import re import os import html import time import functools import ujson import xml.etree.cElementTree as etree from xml.etree.cElementTree import Element from collections import deque, defaultdict import requests from django.conf import settings from django.db.models import Q from markdown.extensions import codehilite, nl2br, tables from zerver.lib.bugdown import fenced_code from zerver.lib.bugdown.fenced_code import FENCE_RE from zerver.lib.camo import get_camo_url from zerver.lib.emoji import translate_emoticons, emoticon_regex from zerver.lib.mention import possible_mentions, \ possible_user_group_mentions, extract_user_group from zerver.lib.url_encoding import encode_stream from zerver.lib.thumbnail import user_uploads_or_external from zerver.lib.timeout import timeout, TimeoutExpired from zerver.lib.cache import cache_with_key, NotFoundInCache from zerver.lib.url_preview import preview as link_preview from zerver.models import ( all_realm_filters, get_active_streams, MAX_MESSAGE_LENGTH, Message, Realm, realm_filters_for_realm, UserProfile, UserGroup, UserGroupMembership, ) import zerver.lib.mention as mention from zerver.lib.tex import render_tex from zerver.lib.exceptions import BugdownRenderingException ReturnT = TypeVar('ReturnT') def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]: ''' Use this decorator with extreme caution. The function you wrap should have no dependency on any arguments (no args, no kwargs) nor should it depend on any global state. ''' val = None def cache_wrapper() -> ReturnT: nonlocal val if val is None: val = method() return val return cache_wrapper FullNameInfo = TypedDict('FullNameInfo', { 'id': int, 'email': str, 'full_name': str, }) DbData = Dict[str, Any] # Format version of the bugdown rendering; stored along with rendered # messages so that we can efficiently determine what needs to be re-rendered version = 1 _T = TypeVar('_T') ElementStringNone = Union[Element, Optional[str]] AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)' GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)' EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)' def verbose_compile(pattern: str) -> Any: return re.compile( "^(.*?)%s(.*?)$" % pattern, re.DOTALL | re.UNICODE | re.VERBOSE ) def normal_compile(pattern: str) -> Any: return re.compile( r"^(.*?)%s(.*)$" % pattern, re.DOTALL | re.UNICODE ) STREAM_LINK_REGEX = r""" (?<![^\s'"\(,:<]) # Start after whitespace or specified chars \#\*\* # and after hash sign followed by double asterisks (?P<stream_name>[^\*]+) # stream name can contain anything \*\* # ends by double asterisks """ @one_time def get_compiled_stream_link_regex() -> Pattern: return verbose_compile(STREAM_LINK_REGEX) LINK_REGEX = None # type: Pattern def get_web_link_regex() -> str: # We create this one time, but not at startup. So the # first message rendered in any process will have some # extra costs. It's roughly 75ms to run this code, so # caching the value in LINK_REGEX is super important here. global LINK_REGEX if LINK_REGEX is not None: return LINK_REGEX tlds = '|'.join(list_of_tlds()) # A link starts at a word boundary, and ends at space, punctuation, or end-of-input. # # We detect a url either by the `https?://` or by building around the TLD. # In lieu of having a recursive regex (which python doesn't support) to match # arbitrary numbers of nested matching parenthesis, we manually build a regexp that # can match up to six # The inner_paren_contents chunk matches the innermore non-parenthesis-holding text, # and the paren_group matches text with, optionally, a matching set of parens inner_paren_contents = r"[^\s()\"]*" paren_group = r""" [^\s()\"]*? # Containing characters that won't end the URL (?: \( %s \) # and more characters in matched parens [^\s()\"]*? # followed by more characters )* # zero-or-more sets of paired parens """ nested_paren_chunk = paren_group for i in range(6): nested_paren_chunk = nested_paren_chunk % (paren_group,) nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,) file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r"" REGEX = r""" (?<![^\s'"\(,:<]) # Start after whitespace or specified chars # (Double-negative lookbehind to allow start-of-string) (?P<url> # Main group (?:(?: # Domain part https?://[\w.:@-]+? # If it has a protocol, anything goes. |(?: # Or, if not, be more strict to avoid false-positives (?:[\w-]+\.)+ # One or more domain components, separated by dots (?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt) ) ) (?:/ # A path, beginning with / %s # zero-to-6 sets of paired parens )?) # Path is optional | (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path %s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True | (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html ) (?= # URL must be followed by (not included in group) [!:;\?\),\.\'\"\>]* # Optional punctuation characters (?:\Z|\s) # followed by whitespace or end of string ) """ % (tlds, nested_paren_chunk, file_links) LINK_REGEX = verbose_compile(REGEX) return LINK_REGEX def clear_state_for_testing() -> None: # The link regex never changes in production, but our tests # try out both sides of ENABLE_FILE_LINKS, so we need # a way to clear it. global LINK_REGEX LINK_REGEX = None bugdown_logger = logging.getLogger() def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str: """ If the link points to a local destination we can just switch to that instead of opening a new tab. """ if db_data: realm_uri_prefix = db_data['realm_uri'] + "/" if link.startswith(realm_uri_prefix): # +1 to skip the `/` before the hash link. return link[len(realm_uri_prefix):] return link def url_embed_preview_enabled(message: Optional[Message]=None, realm: Optional[Realm]=None, no_previews: Optional[bool]=False) -> bool: if not settings.INLINE_URL_EMBED_PREVIEW: return False if no_previews: return False if realm is None: if message is not None: realm = message.get_realm() if realm is None: # realm can be None for odd use cases # like generating documentation or running # test code return True return realm.inline_url_embed_preview def image_preview_enabled(message: Optional[Message]=None, realm: Optional[Realm]=None, no_previews: Optional[bool]=False) -> bool: if not settings.INLINE_IMAGE_PREVIEW: return False if no_previews: return False if realm is None: if message is not None: realm = message.get_realm() if realm is None: # realm can be None for odd use cases # like generating documentation or running # test code return True return realm.inline_image_preview def list_of_tlds() -> List[str]: # HACK we manually blacklist a few domains blacklist = ['PY\n', "MD\n"] # tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt') tlds = [tld.lower().strip() for tld in open(tlds_file, 'r') if tld not in blacklist and not tld[0].startswith('#')] tlds.sort(key=len, reverse=True) return tlds def walk_tree(root: Element, processor: Callable[[Element], Optional[_T]], stop_after_first: bool=False) -> List[_T]: results = [] queue = deque([root]) while queue: currElement = queue.popleft() for child in currElement.getchildren(): if child.getchildren(): queue.append(child) result = processor(child) if result is not None: results.append(result) if stop_after_first: return results return results ElementFamily = NamedTuple('ElementFamily', [ ('grandparent', Optional[Element]), ('parent', Element), ('child', Element) ]) ResultWithFamily = NamedTuple('ResultWithFamily', [ ('family', ElementFamily), ('result', Any) ]) ElementPair = NamedTuple('ElementPair', [ ('parent', Optional[Element]), ('value', Element) ]) def walk_tree_with_family(root: Element, processor: Callable[[Element], Optional[_T]] ) -> List[ResultWithFamily]: results = [] queue = deque([ElementPair(parent=None, value=root)]) while queue: currElementPair = queue.popleft() for child in currElementPair.value.getchildren(): if child.getchildren(): queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3 result = processor(child) if result is not None: if currElementPair.parent is not None: grandparent_element = cast(ElementPair, currElementPair.parent) grandparent = grandparent_element.value else: grandparent = None family = ElementFamily( grandparent=grandparent, parent=currElementPair.value, child=child ) results.append(ResultWithFamily( family=family, result=result )) return results # height is not actually used def add_a( root: Element, url: str, link: str, title: Optional[str]=None, desc: Optional[str]=None, class_attr: str="message_inline_image", data_id: Optional[str]=None, insertion_index: Optional[int]=None, already_thumbnailed: Optional[bool]=False ) -> None: title = title if title is not None else url_filename(link) title = title if title else "" desc = desc if desc is not None else "" if insertion_index is not None: div = markdown.util.etree.Element("div") root.insert(insertion_index, div) else: div = markdown.util.etree.SubElement(root, "div") div.set("class", class_attr) a = markdown.util.etree.SubElement(div, "a") a.set("href", link) a.set("target", "_blank") a.set("title", title) if data_id is not None: a.set("data-id", data_id) img = markdown.util.etree.SubElement(a, "img") if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url): # See docs/thumbnailing.md for some high-level documentation. # # We strip leading '/' from relative URLs here to ensure # consistency in what gets passed to /thumbnail url = url.lstrip('/') img.set("src", "/thumbnail?url={0}&size=thumbnail".format( urllib.parse.quote(url, safe='') )) img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format( urllib.parse.quote(url, safe='') )) else: img.set("src", url) if class_attr == "message_inline_ref": summary_div = markdown.util.etree.SubElement(div, "div") title_div = markdown.util.etree.SubElement(summary_div, "div") title_div.set("class", "message_inline_image_title") title_div.text = title desc_div = markdown.util.etree.SubElement(summary_div, "desc") desc_div.set("class", "message_inline_image_desc") def add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None: container = markdown.util.etree.SubElement(root, "div") container.set("class", "message_embed") img_link = extracted_data.get('image') if img_link: parsed_img_link = urllib.parse.urlparse(img_link) # Append domain where relative img_link url is given if not parsed_img_link.netloc: parsed_url = urllib.parse.urlparse(link) domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url) img_link = urllib.parse.urljoin(domain, img_link) img = markdown.util.etree.SubElement(container, "a") img.set("style", "background-image: url(" + img_link + ")") img.set("href", link) img.set("target", "_blank") img.set("class", "message_embed_image") data_container = markdown.util.etree.SubElement(container, "div") data_container.set("class", "data-container") title = extracted_data.get('title') if title: title_elm = markdown.util.etree.SubElement(data_container, "div") title_elm.set("class", "message_embed_title") a = markdown.util.etree.SubElement(title_elm, "a") a.set("href", link) a.set("target", "_blank") a.set("title", title) a.text = title description = extracted_data.get('description') if description: description_elm = markdown.util.etree.SubElement(data_container, "div") description_elm.set("class", "message_embed_description") description_elm.text = description @cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data") def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]: if settings.TEST_SUITE: from . import testing_mocks res = testing_mocks.twitter(tweet_id) else: creds = { 'consumer_key': settings.TWITTER_CONSUMER_KEY, 'consumer_secret': settings.TWITTER_CONSUMER_SECRET, 'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY, 'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET, } if not all(creds.values()): return None # We lazily import twitter here because its import process is # surprisingly slow, and doing so has a significant impact on # the startup performance of `manage.py` commands. import twitter try: api = twitter.Api(tweet_mode='extended', **creds) # Sometimes Twitter hangs on responses. Timing out here # will cause the Tweet to go through as-is with no inline # preview, rather than having the message be rejected # entirely. This timeout needs to be less than our overall # formatting timeout. tweet = timeout(3, api.GetStatus, tweet_id) res = tweet.AsDict() except AttributeError: bugdown_logger.error('Unable to load twitter api, you may have the wrong ' 'library installed, see https://github.com/zulip/zulip/issues/86') return None except TimeoutExpired: # We'd like to try again later and not cache the bad result, # so we need to re-raise the exception (just as though # we were being rate-limited) raise except twitter.TwitterError as e: t = e.args[0] if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34): # Code 34 means that the message doesn't exist; return # None so that we will cache the error return None elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or t[0]['code'] == 130): # Code 88 means that we were rate-limited and 130 # means Twitter is having capacity issues; either way # just raise the error so we don't cache None and will # try again later. raise else: # It's not clear what to do in cases of other errors, # but for now it seems reasonable to log at error # level (so that we get notified), but then cache the # failure to proceed with our usual work bugdown_logger.error(traceback.format_exc()) return None return res HEAD_START_RE = re.compile('^head[ >]') HEAD_END_RE = re.compile('^/head[ >]') META_START_RE = re.compile('^meta[ >]') META_END_RE = re.compile('^/meta[ >]') def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]: in_head = False # HTML will auto close meta tags, when we start the next tag add # a closing tag if it has not been closed yet. last_closed = True head = [] # TODO: What if response content is huge? Should we get headers first? try: content = requests.get(url, timeout=1).text except Exception: return None # Extract the head and meta tags # All meta tags are self closing, have no children or are closed # automatically. for part in content.split('<'): if not in_head and HEAD_START_RE.match(part): # Started the head node output it to have a document root in_head = True head.append('<head>') elif in_head and HEAD_END_RE.match(part): # Found the end of the head close any remaining tag then stop # processing in_head = False if not last_closed: last_closed = True head.append('</meta>') head.append('</head>') break elif in_head and META_START_RE.match(part): # Found a meta node copy it if not last_closed: head.append('</meta>') last_closed = True head.append('<') head.append(part) if '/>' not in part: last_closed = False elif in_head and META_END_RE.match(part): # End of a meta node just copy it to close the tag head.append('<') head.append(part) last_closed = True try: doc = etree.fromstring(''.join(head)) except etree.ParseError: return None og_image = doc.find('meta[@property="og:image"]') og_title = doc.find('meta[@property="og:title"]') og_desc = doc.find('meta[@property="og:description"]') title = None desc = None if og_image is not None: image = og_image.get('content') else: return None if og_title is not None: title = og_title.get('content') if og_desc is not None: desc = og_desc.get('content') return {'image': image, 'title': title, 'desc': desc} def get_tweet_id(url: str) -> Optional[str]: parsed_url = urllib.parse.urlparse(url) if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')): return None to_match = parsed_url.path # In old-style twitter.com/#!/wdaher/status/1231241234-style URLs, # we need to look at the fragment instead if parsed_url.path == '/' and len(parsed_url.fragment) > 5: to_match = parsed_url.fragment tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match) if not tweet_id_match: return None return tweet_id_match.group("tweetid") class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor): def run(self, root: Element) -> None: # Get all URLs from the blob found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None) for img in found_imgs: url = img.get("src") if not url.startswith("http://"): # Don't rewrite images on our own site (e.g. emoji). continue img.set("src", get_camo_url(url)) class BacktickPattern(markdown.inlinepatterns.Pattern): """ Return a `<code>` element containing the matching text. """ def __init__(self, pattern: str) -> None: markdown.inlinepatterns.Pattern.__init__(self, pattern) self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX) self.tag = 'code' def handleMatch(self, m: Match[str]) -> Union[str, Element]: if m.group(4): el = markdown.util.etree.Element(self.tag) # Modified to not strip whitespace el.text = markdown.util.AtomicString(m.group(4)) return el else: return m.group(2).replace('\\\\', self.ESCAPED_BSLASH) class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor): TWITTER_MAX_IMAGE_HEIGHT = 400 TWITTER_MAX_TO_PREVIEW = 3 INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5 def __init__(self, md: markdown.Markdown) -> None: markdown.treeprocessors.Treeprocessor.__init__(self, md) def get_actual_image_url(self, url: str) -> str: # Add specific per-site cases to convert image-preview urls to image urls. # See https://github.com/zulip/zulip/issues/4658 for more information parsed_url = urllib.parse.urlparse(url) if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')): # https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png -> # https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png split_path = parsed_url.path.split('/') if len(split_path) > 3 and split_path[3] == "blob": return urllib.parse.urljoin('https://raw.githubusercontent.com', '/'.join(split_path[0:3] + split_path[4:])) return url def is_image(self, url: str) -> bool: if not self.markdown.image_preview_enabled: return False parsed_url = urllib.parse.urlparse(url) # List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093 for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]: if parsed_url.path.lower().endswith(ext): return True return False def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]: # TODO: The returned Dict could possibly be a TypedDict in future. parsed_url = urllib.parse.urlparse(url) if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')): is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/') # Only allow preview Dropbox shared links if not (parsed_url.path.startswith('/s/') or parsed_url.path.startswith('/sh/') or is_album): return None # Try to retrieve open graph protocol info for a preview # This might be redundant right now for shared links for images. # However, we might want to make use of title and description # in the future. If the actual image is too big, we might also # want to use the open graph image. image_info = fetch_open_graph_image(url) is_image = is_album or self.is_image(url) # If it is from an album or not an actual image file, # just use open graph image. if is_album or not is_image: # Failed to follow link to find an image preview so # use placeholder image and guess filename if image_info is None: return None image_info["is_image"] = is_image return image_info # Otherwise, try to retrieve the actual image. # This is because open graph image from Dropbox may have padding # and gifs do not work. # TODO: What if image is huge? Should we get headers first? if image_info is None: image_info = dict() image_info['is_image'] = True parsed_url_list = list(parsed_url) parsed_url_list[4] = "dl=1" # Replaces query image_info["image"] = urllib.parse.urlunparse(parsed_url_list) return image_info return None def youtube_id(self, url: str) -> Optional[str]: if not self.markdown.image_preview_enabled: return None # Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s # Slightly modified to support URLs of the form youtu.be/<id> # If it matches, match.group(2) is the video id. schema_re = r'(?:https?://)' host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)' param_re = r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))' id_re = r'([0-9A-Za-z_-]+)' youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$' youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re) match = re.match(youtube_re, url) if match is None: return None return match.group(2) def youtube_image(self, url: str) -> Optional[str]: yt_id = self.youtube_id(url) if yt_id is not None: return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,) return None def vimeo_id(self, url: str) -> Optional[str]: if not self.markdown.image_preview_enabled: return None #(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?) # If it matches, match.group('id') is the video id. vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \ r'(?:channels\/(?:\w+\/)?|groups\/' + \ r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$' match = re.match(vimeo_re, url) if match is None: return None return match.group(5) def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]: title = extracted_data.get("title") if title is not None: return "Vimeo - {}".format(title) return None def twitter_text(self, text: str, urls: List[Dict[str, str]], user_mentions: List[Dict[str, Any]], media: List[Dict[str, Any]]) -> Element: """ Use data from the twitter API to turn links, mentions and media into A tags. Also convert unicode emojis to images. This works by using the urls, user_mentions and media data from the twitter API and searching for unicode emojis in the text using `unicode_emoji_regex`. The first step is finding the locations of the URLs, mentions, media and emoji in the text. For each match we build a dictionary with type, the start location, end location, the URL to link to, and the text(codepoint and title in case of emojis) to be used in the link(image in case of emojis). Next we sort the matches by start location. And for each we add the text from the end of the last link to the start of the current link to the output. The text needs to added to the text attribute of the first node (the P tag) or the tail the last link created. Finally we add any remaining text to the last node. """ to_process = [] # type: List[Dict[str, Any]] # Build dicts for URLs for url_data in urls: short_url = url_data["url"] full_url = url_data["expanded_url"] for match in re.finditer(re.escape(short_url), text, re.IGNORECASE): to_process.append({ 'type': 'url', 'start': match.start(), 'end': match.end(), 'url': short_url, 'text': full_url, }) # Build dicts for mentions for user_mention in user_mentions: screen_name = user_mention['screen_name'] mention_string = '@' + screen_name for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE): to_process.append({ 'type': 'mention', 'start': match.start(), 'end': match.end(), 'url': 'https://twitter.com/' + urllib.parse.quote(screen_name), 'text': mention_string, }) # Build dicts for media for media_item in media: short_url = media_item['url'] expanded_url = media_item['expanded_url'] for match in re.finditer(re.escape(short_url), text, re.IGNORECASE): to_process.append({ 'type': 'media', 'start': match.start(), 'end': match.end(), 'url': short_url, 'text': expanded_url, }) # Build dicts for emojis for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE): orig_syntax = match.group('syntax') codepoint = unicode_emoji_to_codepoint(orig_syntax) if codepoint in codepoint_to_name: display_string = ':' + codepoint_to_name[codepoint] + ':' to_process.append({ 'type': 'emoji', 'start': match.start(), 'end': match.end(), 'codepoint': codepoint, 'title': display_string, }) to_process.sort(key=lambda x: x['start']) p = current_node = markdown.util.etree.Element('p') def set_text(text: str) -> None: """ Helper to set the text or the tail of the current_node """ if current_node == p: current_node.text = text else: current_node.tail = text db_data = self.markdown.zulip_db_data current_index = 0 for item in to_process: # The text we want to link starts in already linked text skip it if item['start'] < current_index: continue # Add text from the end of last link to the start of the current # link set_text(text[current_index:item['start']]) current_index = item['end'] if item['type'] != 'emoji': current_node = elem = url_to_a(db_data, item['url'], item['text']) else: current_node = elem = make_emoji(item['codepoint'], item['title']) p.append(elem) # Add any unused text set_text(text[current_index:]) return p def twitter_link(self, url: str) -> Optional[Element]: tweet_id = get_tweet_id(url) if tweet_id is None: return None try: res = fetch_tweet_data(tweet_id) if res is None: return None user = res['user'] # type: Dict[str, Any] tweet = markdown.util.etree.Element("div") tweet.set("class", "twitter-tweet") img_a = markdown.util.etree.SubElement(tweet, 'a') img_a.set("href", url) img_a.set("target", "_blank") profile_img = markdown.util.etree.SubElement(img_a, 'img') profile_img.set('class', 'twitter-avatar') # For some reason, for, e.g. tweet 285072525413724161, # python-twitter does not give us a # profile_image_url_https, but instead puts that URL in # profile_image_url. So use _https if available, but fall # back gracefully. image_url = user.get('profile_image_url_https', user['profile_image_url']) profile_img.set('src', image_url) text = html.unescape(res['full_text']) urls = res.get('urls', []) user_mentions = res.get('user_mentions', []) media = res.get('media', []) # type: List[Dict[str, Any]] p = self.twitter_text(text, urls, user_mentions, media) tweet.append(p) span = markdown.util.etree.SubElement(tweet, 'span') span.text = "- %s (@%s)" % (user['name'], user['screen_name']) # Add image previews for media_item in media: # Only photos have a preview image if media_item['type'] != 'photo': continue # Find the image size that is smaller than # TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest size_name_tuples = list(media_item['sizes'].items()) size_name_tuples.sort(reverse=True, key=lambda x: x[1]['h']) for size_name, size in size_name_tuples: if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT: break media_url = '%s:%s' % (media_item['media_url_https'], size_name) img_div = markdown.util.etree.SubElement(tweet, 'div') img_div.set('class', 'twitter-image') img_a = markdown.util.etree.SubElement(img_div, 'a') img_a.set('href', media_item['url']) img_a.set('target', '_blank') img_a.set('title', media_item['url']) img = markdown.util.etree.SubElement(img_a, 'img') img.set('src', media_url) return tweet except Exception: # We put this in its own try-except because it requires external # connectivity. If Twitter flakes out, we don't want to not-render # the entire message; we just want to not show the Twitter preview. bugdown_logger.warning(traceback.format_exc()) return None def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]: if e.tag == "a": if e.text is not None: return (e.get("href"), e.text) return (e.get("href"), e.get("href")) return None def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None: grandparent = found_url.family.grandparent parent = found_url.family.parent ahref_element = found_url.family.child (url, text) = found_url.result actual_url = self.get_actual_image_url(url) # url != text usually implies a named link, which we opt not to remove url_eq_text = (url == text) if parent.tag == 'li': add_a(parent, self.get_actual_image_url(url), url, title=text) if not parent.text and not ahref_element.tail and url_eq_text: parent.remove(ahref_element) elif parent.tag == 'p': parent_index = None for index, uncle in enumerate(grandparent.getchildren()): if uncle is parent: parent_index = index break if parent_index is not None: ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index) add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index) else: # We're not inserting after parent, since parent not found. # Append to end of list of grandparent's children as normal add_a(grandparent, actual_url, url, title=text) # If link is alone in a paragraph, delete paragraph containing it if (len(parent.getchildren()) == 1 and (not parent.text or parent.text == "\n") and not ahref_element.tail and url_eq_text): grandparent.remove(parent) else: # If none of the above criteria match, fall back to old behavior add_a(root, actual_url, url, title=text) def find_proper_insertion_index(self, grandparent: Element, parent: Element, parent_index_in_grandparent: int) -> int: # If there are several inline images from same paragraph, ensure that # they are in correct (and not opposite) order by inserting after last # inline image from paragraph 'parent' uncles = grandparent.getchildren() parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")] insertion_index = parent_index_in_grandparent while True: insertion_index += 1 if insertion_index >= len(uncles): return insertion_index uncle = uncles[insertion_index] inline_image_classes = ['message_inline_image', 'message_inline_ref'] if ( uncle.tag != 'div' or 'class' not in uncle.keys() or uncle.attrib['class'] not in inline_image_classes ): return insertion_index uncle_link = list(uncle.iter(tag="a"))[0].attrib['href'] if uncle_link not in parent_links: return insertion_index def is_absolute_url(self, url: str) -> bool: return bool(urllib.parse.urlparse(url).netloc) def run(self, root: Element) -> None: # Get all URLs from the blob found_urls = walk_tree_with_family(root, self.get_url_data) if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE: return rendered_tweet_count = 0 for found_url in found_urls: (url, text) = found_url.result if not self.is_absolute_url(url): if self.is_image(url): self.handle_image_inlining(root, found_url) # We don't have a strong use case for doing url preview for relative links. continue dropbox_image = self.dropbox_image(url) if dropbox_image is not None: class_attr = "message_inline_ref" is_image = dropbox_image["is_image"] if is_image: class_attr = "message_inline_image" # Not making use of title and description of images add_a(root, dropbox_image['image'], url, title=dropbox_image.get('title', ""), desc=dropbox_image.get('desc', ""), class_attr=class_attr, already_thumbnailed=True) continue if self.is_image(url): self.handle_image_inlining(root, found_url) continue if get_tweet_id(url) is not None: if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW: # Only render at most one tweet per message continue twitter_data = self.twitter_link(url) if twitter_data is None: # This link is not actually a tweet known to twitter continue rendered_tweet_count += 1 div = markdown.util.etree.SubElement(root, "div") div.set("class", "inline-preview-twitter") div.insert(0, twitter_data) continue youtube = self.youtube_image(url) if youtube is not None: yt_id = self.youtube_id(url) add_a(root, youtube, url, None, None, "youtube-video message_inline_image", yt_id, already_thumbnailed=True) continue db_data = self.markdown.zulip_db_data if db_data and db_data['sent_by_bot']: continue if not self.markdown.url_embed_preview_enabled: continue try: extracted_data = link_preview.link_embed_data_from_cache(url) except NotFoundInCache: self.markdown.zulip_message.links_for_preview.add(url) continue if extracted_data: vm_id = self.vimeo_id(url) if vm_id is not None: vimeo_image = extracted_data.get('image') vimeo_title = self.vimeo_title(extracted_data) if vimeo_image is not None: add_a(root, vimeo_image, url, vimeo_title, None, "vimeo-video message_inline_image", vm_id, already_thumbnailed=True) if vimeo_title is not None: found_url.family.child.text = vimeo_title else: add_embed(root, url, extracted_data) class Avatar(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: img = markdown.util.etree.Element('img') email_address = match.group('email') email = email_address.strip().lower() profile_id = None db_data = self.markdown.zulip_db_data if db_data is not None: user_dict = db_data['email_info'].get(email) if user_dict is not None: profile_id = user_dict['id'] img.set('class', 'message_body_gravatar') img.set('src', '/avatar/{0}?s=30'.format(profile_id or email)) img.set('title', email) img.set('alt', email) return img def possible_avatar_emails(content: str) -> Set[str]: emails = set() for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]: matches = re.findall(REGEX, content) for email in matches: if email: emails.add(email) return emails path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT, "generated", "emoji", "name_to_codepoint.json") with open(path_to_name_to_codepoint) as name_to_codepoint_file: name_to_codepoint = ujson.load(name_to_codepoint_file) path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT, "generated", "emoji", "codepoint_to_name.json") with open(path_to_codepoint_to_name) as codepoint_to_name_file: codepoint_to_name = ujson.load(codepoint_to_name_file) # All of our emojis(non ZWJ sequences) belong to one of these unicode blocks: # \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement # \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement # \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs # \U0001f600-\U0001f64f - Emoticons (Emoji) # \U0001f680-\U0001f6ff - Transport and Map Symbols # \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs # \u2000-\u206f - General Punctuation # \u2300-\u23ff - Miscellaneous Technical # \u2400-\u243f - Control Pictures # \u2440-\u245f - Optical Character Recognition # \u2460-\u24ff - Enclosed Alphanumerics # \u2500-\u257f - Box Drawing # \u2580-\u259f - Block Elements # \u25a0-\u25ff - Geometric Shapes # \u2600-\u26ff - Miscellaneous Symbols # \u2700-\u27bf - Dingbats # \u2900-\u297f - Supplemental Arrows-B # \u2b00-\u2bff - Miscellaneous Symbols and Arrows # \u3000-\u303f - CJK Symbols and Punctuation # \u3200-\u32ff - Enclosed CJK Letters and Months unicode_emoji_regex = '(?P<syntax>['\ '\U0001F100-\U0001F64F' \ '\U0001F680-\U0001F6FF' \ '\U0001F900-\U0001F9FF' \ '\u2000-\u206F' \ '\u2300-\u27BF' \ '\u2900-\u297F' \ '\u2B00-\u2BFF' \ '\u3000-\u303F' \ '\u3200-\u32FF' \ '])' # The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]| # \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]| # [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for # frontend unicode emoji processing. # The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f # The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff # The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff # The JS regex [\u2000-\u206f] represents \u2000-\u206f # The JS regex [\u2300-\u27bf] represents \u2300-\u27bf # Similarly other JS regexes can be mapped to the respective unicode blocks. # For more information, please refer to the following article: # http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript def make_emoji(codepoint: str, display_string: str) -> Element: # Replace underscore in emoji's title with space title = display_string[1:-1].replace("_", " ") span = markdown.util.etree.Element('span') span.set('class', 'emoji emoji-%s' % (codepoint,)) span.set('title', title) span.set('role', 'img') span.set('aria-label', title) span.text = display_string return span def make_realm_emoji(src: str, display_string: str) -> Element: elt = markdown.util.etree.Element('img') elt.set('src', src) elt.set('class', 'emoji') elt.set("alt", display_string) elt.set("title", display_string[1:-1].replace("_", " ")) return elt def unicode_emoji_to_codepoint(unicode_emoji: str) -> str: codepoint = hex(ord(unicode_emoji))[2:] # Unicode codepoints are minimum of length 4, padded # with zeroes if the length is less than zero. while len(codepoint) < 4: codepoint = '0' + codepoint return codepoint class EmoticonTranslation(markdown.inlinepatterns.Pattern): """ Translates emoticons like `:)` into emoji like `:smile:`. """ def handleMatch(self, match: Match[str]) -> Optional[Element]: db_data = self.markdown.zulip_db_data if db_data is None or not db_data['translate_emoticons']: return None emoticon = match.group('emoticon') translated = translate_emoticons(emoticon) name = translated[1:-1] return make_emoji(name_to_codepoint[name], translated) class UnicodeEmoji(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: orig_syntax = match.group('syntax') codepoint = unicode_emoji_to_codepoint(orig_syntax) if codepoint in codepoint_to_name: display_string = ':' + codepoint_to_name[codepoint] + ':' return make_emoji(codepoint, display_string) else: return None class Emoji(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: orig_syntax = match.group("syntax") name = orig_syntax[1:-1] active_realm_emoji = {} # type: Dict[str, Dict[str, str]] db_data = self.markdown.zulip_db_data if db_data is not None: active_realm_emoji = db_data['active_realm_emoji'] if self.markdown.zulip_message and name in active_realm_emoji: return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax) elif name == 'zulip': return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax) elif name in name_to_codepoint: return make_emoji(name_to_codepoint[name], orig_syntax) else: return None def content_has_emoji_syntax(content: str) -> bool: return re.search(EMOJI_REGEX, content) is not None class ModalLink(markdown.inlinepatterns.Pattern): """ A pattern that allows including in-app modal links in messages. """ def handleMatch(self, match: Match[str]) -> Element: relative_url = match.group('relative_url') text = match.group('text') a_tag = markdown.util.etree.Element("a") a_tag.set("href", relative_url) a_tag.set("title", relative_url) a_tag.text = text return a_tag class Tex(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Element: rendered = render_tex(match.group('body'), is_inline=True) if rendered is not None: return etree.fromstring(rendered.encode('utf-8')) else: # Something went wrong while rendering span = markdown.util.etree.Element('span') span.set('class', 'tex-error') span.text = '$$' + match.group('body') + '$$' return span upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$") def url_filename(url: str) -> str: """Extract the filename if a URL is an uploaded file, or return the original URL""" match = upload_title_re.match(url) if match: return match.group('filename') else: return url def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None: """Set certain attributes we want on every link.""" if target_blank: link.set('target', '_blank') link.set('title', url_filename(link.get('href'))) def sanitize_url(url: str) -> Optional[str]: """ Sanitize a url against xss attacks. See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url. """ try: parts = urllib.parse.urlparse(url.replace(' ', '%20')) scheme, netloc, path, params, query, fragment = parts except ValueError: # Bad url - so bad it couldn't be parsed. return '' # If there is no scheme or netloc and there is a '@' in the path, # treat it as a mailto: and set the appropriate scheme if scheme == '' and netloc == '' and '@' in path: scheme = 'mailto' elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/': # Allow domain-relative links return urllib.parse.urlunparse(('', '', path, params, query, fragment)) elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0: # Allow fragment links return urllib.parse.urlunparse(('', '', '', '', '', fragment)) # Zulip modification: If scheme is not specified, assume http:// # We re-enter sanitize_url because netloc etc. need to be re-parsed. if not scheme: return sanitize_url('http://' + url) locless_schemes = ['mailto', 'news', 'file', 'bitcoin'] if netloc == '' and scheme not in locless_schemes: # This fails regardless of anything else. # Return immediately to save additional processing return None # Upstream code will accept a URL like javascript://foo because it # appears to have a netloc. Additionally there are plenty of other # schemes that do weird things like launch external programs. To be # on the safe side, we whitelist the scheme. if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'): return None # Upstream code scans path, parameters, and query for colon characters # because # # some aliases [for javascript:] will appear to urllib.parse to have # no scheme. On top of that relative links (i.e.: "foo/bar.html") # have no scheme. # # We already converted an empty scheme to http:// above, so we skip # the colon check, which would also forbid a lot of legitimate URLs. # Url passes all tests. Return url as-is. return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment)) def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]: a = markdown.util.etree.Element('a') href = sanitize_url(url) target_blank = True if href is None: # Rejected by sanitize_url; render it as plain text. return url if text is None: text = markdown.util.AtomicString(url) href = rewrite_local_links_to_relative(db_data, href) target_blank = not href.startswith("#narrow") and not href.startswith('mailto:') a.set('href', href) a.text = text fixup_link(a, target_blank) return a class CompiledPattern(markdown.inlinepatterns.Pattern): def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None: # This is similar to the superclass's small __init__ function, # but we skip the compilation step and let the caller give us # a compiled regex. self.compiled_re = compiled_re self.md = md class AutoLink(CompiledPattern): def handleMatch(self, match: Match[str]) -> ElementStringNone: url = match.group('url') db_data = self.markdown.zulip_db_data return url_to_a(db_data, url) class UListProcessor(markdown.blockprocessors.UListProcessor): """ Process unordered list blocks. Based on markdown.blockprocessors.UListProcessor, but does not accept '+' or '-' as a bullet character.""" TAG = 'ul' RE = re.compile('^[ ]{0,3}[*][ ]+(.*)') def __init__(self, parser: Any) -> None: # HACK: Set the tab length to 2 just for the initialization of # this class, so that bulleted lists (and only bulleted lists) # work off 2-space indentation. parser.markdown.tab_length = 2 super().__init__(parser) parser.markdown.tab_length = 4 class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor): """ Process unordered list blocks. Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent """ def __init__(self, parser: Any) -> None: # HACK: Set the tab length to 2 just for the initialization of # this class, so that bulleted lists (and only bulleted lists) # work off 2-space indentation. parser.markdown.tab_length = 2 super().__init__(parser) parser.markdown.tab_length = 4 class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor): """ Process BlockQuotes. Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent """ # Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))' r'[ ]{0,3}>[ ]?(.*)') mention_re = re.compile(mention.find_mentions) def clean(self, line: str) -> str: # Silence all the mentions inside blockquotes line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line) # And then run the upstream processor's code for removing the '>' return super().clean(line) class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor): """ Allows unordered list blocks that come directly after a paragraph to be rendered as an unordered list Detects paragraphs that have a matching list item that comes directly after a line of text, and inserts a newline between to satisfy Markdown""" LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE) HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE) def run(self, lines: List[str]) -> List[str]: """ Insert a newline between a paragraph and ulist if missing """ inserts = 0 fence = None copy = lines[:] for i in range(len(lines) - 1): # Ignore anything that is inside a fenced code block m = FENCE_RE.match(lines[i]) if not fence and m: fence = m.group('fence') elif fence and m and fence == m.group('fence'): fence = None # If we're not in a fenced block and we detect an upcoming list # hanging off a paragraph, add a newline if (not fence and lines[i] and self.LI_RE.match(lines[i+1]) and not self.LI_RE.match(lines[i])): copy.insert(i+inserts+1, '') inserts += 1 return copy class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor): """ Finds a sequence of lines numbered by the same number""" RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)') TAB_LENGTH = 2 def run(self, lines: List[str]) -> List[str]: new_lines = [] # type: List[str] current_list = [] # type: List[Match[str]] current_indent = 0 for line in lines: m = self.RE.match(line) # Remember if this line is a continuation of already started list is_next_item = (m and current_list and current_indent == len(m.group(1)) // self.TAB_LENGTH) if not is_next_item: # There is no more items in the list we were processing new_lines.extend(self.renumber(current_list)) current_list = [] if not m: # Ordinary line new_lines.append(line) elif is_next_item: # Another list item current_list.append(m) else: # First list item current_list = [m] current_indent = len(m.group(1)) // self.TAB_LENGTH new_lines.extend(self.renumber(current_list)) return new_lines def renumber(self, mlist: List[Match[str]]) -> List[str]: if not mlist: return [] start_number = int(mlist[0].group(2)) # Change numbers only if every one is the same change_numbers = True for m in mlist: if int(m.group(2)) != start_number: change_numbers = False break lines = [] # type: List[str] counter = start_number for m in mlist: number = str(counter) if change_numbers else m.group(2) lines.append('%s%s. %s' % (m.group(1), number, m.group(3))) counter += 1 return lines # We need the following since upgrade from py-markdown 2.6.11 to 3.0.1 # modifies the link handling significantly. The following is taken from # py-markdown 2.6.11 markdown/inlinepatterns.py. @one_time def get_link_re() -> str: ''' Very important--if you need to change this code to depend on any arguments, you must eliminate the "one_time" decorator and consider performance implications. We only want to compute this value once. ''' NOBRACKET = r'[^\]\[]*' BRK = ( r'\[(' + (NOBRACKET + r'(\[')*6 + (NOBRACKET + r'\])*')*6 + NOBRACKET + r')\]' ) NOIMG = r'(?<!\!)' # [text](url) or [text](<url>) or [text](url "title") LINK_RE = NOIMG + BRK + \ r'''\(\s*(<(?:[^<>\\]|\\.)*>|(\([^()]*\)|[^()])*?)\s*(('(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*")\s*)?\)''' return normal_compile(LINK_RE) def prepare_realm_pattern(source: str) -> str: """ Augment a realm filter so it only matches after start-of-string, whitespace, or opening delimiters, won't match if there are word characters directly after, and saves what was matched as "name". """ return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + r')(?!\w)' # Given a regular expression pattern, linkifies groups that match it # using the provided format string to construct the URL. class RealmFilterPattern(markdown.inlinepatterns.Pattern): """ Applied a given realm filter to the input """ def __init__(self, source_pattern: str, format_string: str, markdown_instance: Optional[markdown.Markdown]=None) -> None: self.pattern = prepare_realm_pattern(source_pattern) self.format_string = format_string markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance) def handleMatch(self, m: Match[str]) -> Union[Element, str]: db_data = self.markdown.zulip_db_data return url_to_a(db_data, self.format_string % m.groupdict(), m.group("name")) class UserMentionPattern(markdown.inlinepatterns.Pattern): def handleMatch(self, m: Match[str]) -> Optional[Element]: match = m.group('match') silent = m.group('silent') == '_' db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: if match.startswith("**") and match.endswith("**"): name = match[2:-2] else: return None wildcard = mention.user_mention_matches_wildcard(name) id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name) if id_syntax_match: id = id_syntax_match.group("user_id") user = db_data['mention_data'].get_user_by_id(id) else: user = db_data['mention_data'].get_user_by_name(name) if wildcard: self.markdown.zulip_message.mentions_wildcard = True user_id = "*" elif user: if not silent: self.markdown.zulip_message.mentions_user_ids.add(user['id']) name = user['full_name'] user_id = str(user['id']) else: # Don't highlight @mentions that don't refer to a valid user return None el = markdown.util.etree.Element("span") el.set('data-user-id', user_id) if silent: el.set('class', 'user-mention silent') el.text = "%s" % (name,) else: el.set('class', 'user-mention') el.text = "@%s" % (name,) return el return None class UserGroupMentionPattern(markdown.inlinepatterns.Pattern): def handleMatch(self, m: Match[str]) -> Optional[Element]: match = m.group(2) db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: name = extract_user_group(match) user_group = db_data['mention_data'].get_user_group(name) if user_group: self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id) name = user_group.name user_group_id = str(user_group.id) else: # Don't highlight @-mentions that don't refer to a valid user # group. return None el = markdown.util.etree.Element("span") el.set('class', 'user-group-mention') el.set('data-user-group-id', user_group_id) el.text = "@%s" % (name,) return el return None class StreamPattern(CompiledPattern): def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]: db_data = self.markdown.zulip_db_data if db_data is None: return None stream = db_data['stream_names'].get(name) return stream def handleMatch(self, m: Match[str]) -> Optional[Element]: name = m.group('stream_name') if self.markdown.zulip_message: stream = self.find_stream_by_name(name) if stream is None: return None el = markdown.util.etree.Element('a') el.set('class', 'stream') el.set('data-stream-id', str(stream['id'])) # TODO: We should quite possibly not be specifying the # href here and instead having the browser auto-add the # href when it processes a message with one of these, to # provide more clarity to API clients. stream_url = encode_stream(stream['id'], name) el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url)) el.text = '#{stream_name}'.format(stream_name=name) return el return None def possible_linked_stream_names(content: str) -> Set[str]: matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE) return set(matches) class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor): def run(self, lines: Iterable[str]) -> Iterable[str]: db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: # We check for alert words here, the set of which are # dependent on which users may see this message. # # Our caller passes in the list of possible_words. We # don't do any special rendering; we just append the alert words # we find to the set self.markdown.zulip_message.alert_words. realm_words = db_data['possible_words'] content = '\n'.join(lines).lower() allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]']) allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]']) for word in realm_words: escaped = re.escape(word.lower()) match_re = re.compile('(?:%s)%s(?:%s)' % (allowed_before_punctuation, escaped, allowed_after_punctuation)) if re.search(match_re, content): self.markdown.zulip_message.alert_words.add(word) return lines # This prevents realm_filters from running on the content of a # Markdown link, breaking up the link. This is a monkey-patch, but it # might be worth sending a version of this change upstream. class AtomicLinkPattern(CompiledPattern): def get_element(self, m: Match[str]) -> Optional[Element]: href = m.group(9) if not href: return None if href[0] == "<": href = href[1:-1] href = sanitize_url(self.unescape(href.strip())) if href is None: return None db_data = self.markdown.zulip_db_data href = rewrite_local_links_to_relative(db_data, href) el = markdown.util.etree.Element('a') el.text = m.group(2) el.set('href', href) fixup_link(el, target_blank=(href[:1] != '#')) return el def handleMatch(self, m: Match[str]) -> Optional[Element]: ret = self.get_element(m) if ret is None: return None if not isinstance(ret, str): ret.text = markdown.util.AtomicString(ret.text) return ret def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry: # Registry is a new class added by py-markdown to replace Ordered List. # Since Registry doesn't support .keys(), it is easier to make a new # object instead of removing keys from the existing object. new_r = markdown.util.Registry() for k in keys: new_r.register(r[k], k, r.get_index_for_name(k)) return new_r # These are used as keys ("realm_filters_keys") to md_engines and the respective # realm filter caches DEFAULT_BUGDOWN_KEY = -1 ZEPHYR_MIRROR_BUGDOWN_KEY = -2 class Bugdown(markdown.Markdown): def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None: # define default configs self.config = { "realm_filters": [kwargs['realm_filters'], "Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)], "realm": [kwargs['realm'], "Realm id"], "code_block_processor_disabled": [kwargs['code_block_processor_disabled'], "Disabled for email gateway"] } super().__init__(*args, **kwargs) self.set_output_format('html') def build_parser(self) -> markdown.Markdown: # Build the parser using selected default features from py-markdown. # The complete list of all available processors can be found in the # super().build_parser() function. # # Note: for any py-markdown updates, manually check if we want any # of the new features added upstream or not; they wouldn't get # included by default. self.preprocessors = self.build_preprocessors() self.parser = self.build_block_parser() self.inlinePatterns = self.build_inlinepatterns() self.treeprocessors = self.build_treeprocessors() self.postprocessors = self.build_postprocessors() self.handle_zephyr_mirror() return self def build_preprocessors(self) -> markdown.util.Registry: # We disable the following preprocessors from upstream: # # html_block - insecure # reference - references don't make sense in a chat context. preprocessors = markdown.util.Registry() preprocessors.register(AutoNumberOListPreprocessor(self), 'auto_number_olist', 40) preprocessors.register(BugdownUListPreprocessor(self), 'hanging_ulists', 35) preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30) preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25) preprocessors.register(AlertWordsNotificationProcessor(self), 'custom_text_notifications', 20) return preprocessors def build_block_parser(self) -> markdown.util.Registry: # We disable the following blockparsers from upstream: # # indent - replaced by ours # hashheader - disabled, since headers look bad and don't make sense in a chat context. # setextheader - disabled, since headers look bad and don't make sense in a chat context. # olist - replaced by ours # ulist - replaced by ours # quote - replaced by ours parser = markdown.blockprocessors.BlockParser(self) parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 85) if not self.getConfig('code_block_processor_disabled'): parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 80) # We get priority 75 from 'table' extension parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70) parser.blockprocessors.register(UListProcessor(parser), 'ulist', 65) parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 60) parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55) parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50) return parser def build_inlinepatterns(self) -> markdown.util.Registry: # We disable the following upstream inline patterns: # # backtick - replaced by ours # escape - probably will re-add at some point. # link - replaced by ours # image_link - replaced by ours # autolink - replaced by ours # automail - replaced by ours # linebreak - we use nl2br and consider that good enough # html - insecure # reference - references not useful # image_reference - references not useful # short_reference - references not useful # --------------------------------------------------- # strong_em - for these three patterns, # strong2 - we have our own versions where # emphasis2 - we disable _ for bold and emphasis # Declare regexes for clean single line calls to .register(). NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE # Custom strikethrough syntax: ~~foo~~ DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)' # Custom bold syntax: **foo** but not __foo__ # str inside ** must start and end with a word character # it need for things like "const char *x = (char *)y" EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*' ENTITY_RE = markdown.inlinepatterns.ENTITY_RE STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*' # Inline code block without whitespace stripping BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))' # Add Inline Patterns. We use a custom numbering of the # rules, that preserves the order from upstream but leaves # space for us to add our own. reg = markdown.util.Registry() reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105) reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100) reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95) reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90) reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85) reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80) reg.register(ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'), 'modal_link', 75) # Note that !gravatar syntax should be deprecated long term. reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70) reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65) reg.register(AtomicLinkPattern(get_link_re(), self), 'link', 60) reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55) # Reserve priority 45-54 for Realm Filters reg = self.register_realm_filters(reg) reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40) reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35) reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30) reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25) reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20) reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15) reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10) # We get priority 5 from 'nl2br' extension reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0) return reg def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry: for (pattern, format_string, id) in self.getConfig("realm_filters"): inlinePatterns.register(RealmFilterPattern(pattern, format_string, self), 'realm_filters/%s' % (pattern), 45) return inlinePatterns def build_treeprocessors(self) -> markdown.util.Registry: # Here we build all the processors from upstream, plus a few of our own. treeprocessors = markdown.util.Registry() # We get priority 30 from 'hilite' extension treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25) treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20) treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15) if settings.CAMO_URI: treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10) return treeprocessors def build_postprocessors(self) -> markdown.util.Registry: # These are the default python-markdown processors, unmodified. postprocessors = markdown.util.Registry() postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20) postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15) postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10) return postprocessors def getConfig(self, key: str, default: str='') -> Any: """ Return a setting for the given key or an empty string. """ if key in self.config: return self.config[key][0] else: return default def handle_zephyr_mirror(self) -> None: if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY: # Disable almost all inline patterns for zephyr mirror # users' traffic that is mirrored. Note that # inline_interesting_links is a treeprocessor and thus is # not removed self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink']) self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links', 'rewrite_to_https']) # insert new 'inline' processor because we have changed self.inlinePatterns # but InlineProcessor copies md as self.md in __init__. self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25) self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications']) self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph']) md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown] realm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]] def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None: md_engine_key = (realm_filters_key, email_gateway) if md_engine_key in md_engines: del md_engines[md_engine_key] realm_filters = realm_filter_data[realm_filters_key] md_engines[md_engine_key] = build_engine( realm_filters=realm_filters, realm_filters_key=realm_filters_key, email_gateway=email_gateway, ) def build_engine(realm_filters: List[Tuple[str, str, int]], realm_filters_key: int, email_gateway: bool) -> markdown.Markdown: engine = Bugdown( realm_filters=realm_filters, realm=realm_filters_key, code_block_processor_disabled=email_gateway, extensions = [ nl2br.makeExtension(), tables.makeExtension(), codehilite.makeExtension( linenums=False, guess_lang=False ), ]) return engine def topic_links(realm_filters_key: int, topic_name: str) -> List[str]: matches = [] # type: List[str] realm_filters = realm_filters_for_realm(realm_filters_key) for realm_filter in realm_filters: pattern = prepare_realm_pattern(realm_filter[0]) for m in re.finditer(pattern, topic_name): matches += [realm_filter[1] % m.groupdict()] return matches def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None: # If realm_filters_key is None, load all filters global realm_filter_data if realm_filters_key is None: all_filters = all_realm_filters() all_filters[DEFAULT_BUGDOWN_KEY] = [] for realm_filters_key, filters in all_filters.items(): realm_filter_data[realm_filters_key] = filters make_md_engine(realm_filters_key, email_gateway) # Hack to ensure that getConfig("realm") is right for mirrored Zephyrs realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = [] make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False) else: realm_filters = realm_filters_for_realm(realm_filters_key) if realm_filters_key not in realm_filter_data or \ realm_filter_data[realm_filters_key] != realm_filters: # Realm filters data has changed, update `realm_filter_data` and any # of the existing markdown engines using this set of realm filters. realm_filter_data[realm_filters_key] = realm_filters for email_gateway_flag in [True, False]: if (realm_filters_key, email_gateway_flag) in md_engines: # Update only existing engines(if any), don't create new one. make_md_engine(realm_filters_key, email_gateway_flag) if (realm_filters_key, email_gateway) not in md_engines: # Markdown engine corresponding to this key doesn't exists so create one. make_md_engine(realm_filters_key, email_gateway) # We want to log Markdown parser failures, but shouldn't log the actual input # message for privacy reasons. The compromise is to replace all alphanumeric # characters with 'x'. # # We also use repr() to improve reproducibility, and to escape terminal control # codes, which can do surprisingly nasty things. _privacy_re = re.compile('\\w', flags=re.UNICODE) def privacy_clean_markdown(content: str) -> str: return repr(_privacy_re.sub('x', content)) def log_bugdown_error(msg: str) -> None: """We use this unusual logging approach to log the bugdown error, in order to prevent AdminNotifyHandler from sending the santized original markdown formatting into another Zulip message, which could cause an infinite exception loop.""" bugdown_logger.error(msg) def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]: if not emails: return dict() q_list = { Q(email__iexact=email.strip().lower()) for email in emails } rows = UserProfile.objects.filter( realm_id=realm_id ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'email', ) dct = { row['email'].strip().lower(): row for row in rows } return dct def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]: if not mention_texts: return list() # Remove the trailing part of the `name|id` mention syntax, # thus storing only full names in full_names. full_names = set() name_re = r'(?P<full_name>.+)\|\d+$' for mention_text in mention_texts: name_syntax_match = re.match(name_re, mention_text) if name_syntax_match: full_names.add(name_syntax_match.group("full_name")) else: full_names.add(mention_text) q_list = { Q(full_name__iexact=full_name) for full_name in full_names } rows = UserProfile.objects.filter( realm_id=realm_id, is_active=True, ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'full_name', 'email', ) return list(rows) class MentionData: def __init__(self, realm_id: int, content: str) -> None: mention_texts = possible_mentions(content) possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts) self.full_name_info = { row['full_name'].lower(): row for row in possible_mentions_info } self.user_id_info = { row['id']: row for row in possible_mentions_info } self.init_user_group_data(realm_id=realm_id, content=content) def init_user_group_data(self, realm_id: int, content: str) -> None: user_group_names = possible_user_group_mentions(content) self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names) self.user_group_members = defaultdict(list) # type: Dict[int, List[int]] group_ids = [group.id for group in self.user_group_name_info.values()] if not group_ids: # Early-return to avoid the cost of hitting the ORM, # which shows up in profiles. return membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids) for info in membership.values('user_group_id', 'user_profile_id'): group_id = info['user_group_id'] user_profile_id = info['user_profile_id'] self.user_group_members[group_id].append(user_profile_id) def get_user_by_name(self, name: str) -> Optional[FullNameInfo]: # warning: get_user_by_name is not dependable if two # users of the same full name are mentioned. Use # get_user_by_id where possible. return self.full_name_info.get(name.lower(), None) def get_user_by_id(self, id: str) -> Optional[FullNameInfo]: return self.user_id_info.get(int(id), None) def get_user_ids(self) -> Set[int]: """ Returns the user IDs that might have been mentioned by this content. Note that because this data structure has not parsed the message and does not know about escaping/code blocks, this will overestimate the list of user ids. """ return set(self.user_id_info.keys()) def get_user_group(self, name: str) -> Optional[UserGroup]: return self.user_group_name_info.get(name.lower(), None) def get_group_members(self, user_group_id: int) -> List[int]: return self.user_group_members.get(user_group_id, []) def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]: if not user_group_names: return dict() rows = UserGroup.objects.filter(realm_id=realm_id, name__in=user_group_names) dct = {row.name.lower(): row for row in rows} return dct def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]: if not stream_names: return dict() q_list = { Q(name=name) for name in stream_names } rows = get_active_streams( realm=realm, ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'name', ) dct = { row['name']: row for row in rows } return dct def do_convert(content: str, message: Optional[Message]=None, message_realm: Optional[Realm]=None, possible_words: Optional[Set[str]]=None, sent_by_bot: Optional[bool]=False, translate_emoticons: Optional[bool]=False, mention_data: Optional[MentionData]=None, email_gateway: Optional[bool]=False, no_previews: Optional[bool]=False) -> str: """Convert Markdown to HTML, with Zulip-specific settings and hacks.""" # This logic is a bit convoluted, but the overall goal is to support a range of use cases: # * Nothing is passed in other than content -> just run default options (e.g. for docs) # * message is passed, but no realm is -> look up realm from message # * message_realm is passed -> use that realm for bugdown purposes if message is not None: if message_realm is None: message_realm = message.get_realm() if message_realm is None: realm_filters_key = DEFAULT_BUGDOWN_KEY else: realm_filters_key = message_realm.id if message and hasattr(message, 'id') and message.id: logging_message_id = 'id# ' + str(message.id) else: logging_message_id = 'unknown' if message is not None and message_realm is not None: if message_realm.is_zephyr_mirror_realm: if message.sending_client.name == "zephyr_mirror": # Use slightly customized Markdown processor for content # delivered via zephyr_mirror realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY maybe_update_markdown_engines(realm_filters_key, email_gateway) md_engine_key = (realm_filters_key, email_gateway) if md_engine_key in md_engines: _md_engine = md_engines[md_engine_key] else: if DEFAULT_BUGDOWN_KEY not in md_engines: maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False) _md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)] # Reset the parser; otherwise it will get slower over time. _md_engine.reset() # Filters such as UserMentionPattern need a message. _md_engine.zulip_message = message _md_engine.zulip_realm = message_realm _md_engine.zulip_db_data = None # for now _md_engine.image_preview_enabled = image_preview_enabled( message, message_realm, no_previews) _md_engine.url_embed_preview_enabled = url_embed_preview_enabled( message, message_realm, no_previews) # Pre-fetch data from the DB that is used in the bugdown thread if message is not None: assert message_realm is not None # ensured above if message is not None if possible_words is None: possible_words = set() # Set[str] # Here we fetch the data structures needed to render # mentions/avatars/stream mentions from the database, but only # if there is syntax in the message that might use them, since # the fetches are somewhat expensive and these types of syntax # are uncommon enough that it's a useful optimization. if mention_data is None: mention_data = MentionData(message_realm.id, content) emails = possible_avatar_emails(content) email_info = get_email_info(message_realm.id, emails) stream_names = possible_linked_stream_names(content) stream_name_info = get_stream_name_info(message_realm, stream_names) if content_has_emoji_syntax(content): active_realm_emoji = message_realm.get_active_emoji() else: active_realm_emoji = dict() _md_engine.zulip_db_data = { 'possible_words': possible_words, 'email_info': email_info, 'mention_data': mention_data, 'active_realm_emoji': active_realm_emoji, 'realm_uri': message_realm.uri, 'sent_by_bot': sent_by_bot, 'stream_names': stream_name_info, 'translate_emoticons': translate_emoticons, } try: # Spend at most 5 seconds rendering; this protects the backend # from being overloaded by bugs (e.g. markdown logic that is # extremely inefficient in corner cases) as well as user # errors (e.g. a realm filter that makes some syntax # infinite-loop). rendered_content = timeout(5, _md_engine.convert, content) # Throw an exception if the content is huge; this protects the # rest of the codebase from any bugs where we end up rendering # something huge. if len(rendered_content) > MAX_MESSAGE_LENGTH * 10: raise BugdownRenderingException('Rendered content exceeds %s characters (message %s)' % (MAX_MESSAGE_LENGTH * 10, logging_message_id)) return rendered_content except Exception: cleaned = privacy_clean_markdown(content) # NOTE: Don't change this message without also changing the # logic in logging_handlers.py or we can create recursive # exceptions. exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s\n (message %s)' % (traceback.format_exc(), cleaned, logging_message_id)) bugdown_logger.exception(exception_message) raise BugdownRenderingException() finally: # These next three lines are slightly paranoid, since # we always set these right before actually using the # engine, but better safe then sorry. _md_engine.zulip_message = None _md_engine.zulip_realm = None _md_engine.zulip_db_data = None bugdown_time_start = 0.0 bugdown_total_time = 0.0 bugdown_total_requests = 0 def get_bugdown_time() -> float: return bugdown_total_time def get_bugdown_requests() -> int: return bugdown_total_requests def bugdown_stats_start() -> None: global bugdown_time_start bugdown_time_start = time.time() def bugdown_stats_finish() -> None: global bugdown_total_time global bugdown_total_requests global bugdown_time_start bugdown_total_requests += 1 bugdown_total_time += (time.time() - bugdown_time_start) def convert(content: str, message: Optional[Message]=None, message_realm: Optional[Realm]=None, possible_words: Optional[Set[str]]=None, sent_by_bot: Optional[bool]=False, translate_emoticons: Optional[bool]=False, mention_data: Optional[MentionData]=None, email_gateway: Optional[bool]=False, no_previews: Optional[bool]=False) -> str: bugdown_stats_start() ret = do_convert(content, message, message_realm, possible_words, sent_by_bot, translate_emoticons, mention_data, email_gateway, no_previews=no_previews) bugdown_stats_finish() return ret
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1315_0
crossvul-python_data_bad_1915_10
# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib.parse from typing import List, Optional from netaddr import AddrFormatError, IPAddress from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import ( IProtocolFactory, IReactorCore, IStreamClientEndpoint, ) from twisted.web.client import URI, Agent, HTTPConnectionPool from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.util import Clock logger = logging.getLogger(__name__) @implementer(IAgent) class MatrixFederationAgent: """An Agent-like thing which provides a `request` method which correctly handles resolving matrix server names when using matrix://. Handles standard https URIs as normal. Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.) Args: reactor: twisted reactor to use for underlying requests tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. user_agent: The user agent header to use for federation requests. _srv_resolver: SrvResolver implementation to use for looking up SRV records. None to use a default implementation. _well_known_resolver: WellKnownResolver to use to perform well-known lookups. None to use a default implementation. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): self._reactor = reactor self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 self._agent = Agent.usingEndpointFactory( self._reactor, MatrixHostnameEndpointFactory( reactor, tls_client_options_factory, _srv_resolver ), pool=self._pool, ) self.user_agent = user_agent if _well_known_resolver is None: _well_known_resolver = WellKnownResolver( self._reactor, agent=Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, ), user_agent=self.user_agent, ) self._well_known_resolver = _well_known_resolver @defer.inlineCallbacks def request( self, method: bytes, uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, ) -> defer.Deferred: """ Args: method: HTTP method: GET/POST/etc uri: Absolute URI to be retrieved headers: HTTP headers to send with the request, or None to send no extra headers. bodyProducer: An object which can generate bytes to make up the body of this request (for example, the properly encoded contents of a file for a file upload). Or None if the request is to have no body. Returns: Deferred[twisted.web.iweb.IResponse]: fires when the header of the response has been received (regardless of the response status code). Fails if there is any problem which prevents that response from being received (including problems that prevent the request from being sent). """ # We use urlparse as that will set `port` to None if there is no # explicit port. parsed_uri = urllib.parse.urlparse(uri) # There must be a valid hostname. assert parsed_uri.hostname # If this is a matrix:// URI check if the server has delegated matrix # traffic using well-known delegation. # # We have to do this here and not in the endpoint as we need to rewrite # the host header with the delegated server name. delegated_server = None if ( parsed_uri.scheme == b"matrix" and not _is_ip_literal(parsed_uri.hostname) and not parsed_uri.port ): well_known_result = yield defer.ensureDeferred( self._well_known_resolver.get_well_known(parsed_uri.hostname) ) delegated_server = well_known_result.delegated_server if delegated_server: # Ok, the server has delegated matrix traffic to somewhere else, so # lets rewrite the URL to replace the server with the delegated # server name. uri = urllib.parse.urlunparse( ( parsed_uri.scheme, delegated_server, parsed_uri.path, parsed_uri.params, parsed_uri.query, parsed_uri.fragment, ) ) parsed_uri = urllib.parse.urlparse(uri) # We need to make sure the host header is set to the netloc of the # server and that a user-agent is provided. if headers is None: headers = Headers() else: headers = headers.copy() if not headers.hasHeader(b"host"): headers.addRawHeader(b"host", parsed_uri.netloc) if not headers.hasHeader(b"user-agent"): headers.addRawHeader(b"user-agent", self.user_agent) res = yield make_deferred_yieldable( self._agent.request(method, uri, headers, bodyProducer) ) return res @implementer(IAgentEndpointFactory) class MatrixHostnameEndpointFactory: """Factory for MatrixHostnameEndpoint for parsing to an Agent. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], srv_resolver: Optional[SrvResolver], ): self._reactor = reactor self._tls_client_options_factory = tls_client_options_factory if srv_resolver is None: srv_resolver = SrvResolver() self._srv_resolver = srv_resolver def endpointForURI(self, parsed_uri): return MatrixHostnameEndpoint( self._reactor, self._tls_client_options_factory, self._srv_resolver, parsed_uri, ) @implementer(IStreamClientEndpoint) class MatrixHostnameEndpoint: """An endpoint that resolves matrix:// URLs using Matrix server name resolution (i.e. via SRV). Does not check for well-known delegation. Args: reactor: twisted reactor to use for underlying requests tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. srv_resolver: The SRV resolver to use parsed_uri: The parsed URI that we're wanting to connect to. """ def __init__( self, reactor: IReactorCore, tls_client_options_factory: Optional[FederationPolicyForHTTPS], srv_resolver: SrvResolver, parsed_uri: URI, ): self._reactor = reactor self._parsed_uri = parsed_uri # set up the TLS connection params # # XXX disabling TLS is really only supported here for the benefit of the # unit tests. We should make the UTs cope with TLS rather than having to make # the code support the unit tests. if tls_client_options_factory is None: self._tls_options = None else: self._tls_options = tls_client_options_factory.get_options( self._parsed_uri.host ) self._srv_resolver = srv_resolver def connect(self, protocol_factory: IProtocolFactory) -> defer.Deferred: """Implements IStreamClientEndpoint interface """ return run_in_background(self._do_connect, protocol_factory) async def _do_connect(self, protocol_factory: IProtocolFactory) -> None: first_exception = None server_list = await self._resolve_server() for server in server_list: host = server.host port = server.port try: logger.debug("Connecting to %s:%i", host.decode("ascii"), port) endpoint = HostnameEndpoint(self._reactor, host, port) if self._tls_options: endpoint = wrapClientTLS(self._tls_options, endpoint) result = await make_deferred_yieldable( endpoint.connect(protocol_factory) ) return result except Exception as e: logger.info( "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e ) if not first_exception: first_exception = e # We return the first failure because that's probably the most interesting. if first_exception: raise first_exception # This shouldn't happen as we should always have at least one host/port # to try and if that doesn't work then we'll have an exception. raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,)) async def _resolve_server(self) -> List[Server]: """Resolves the server name to a list of hosts and ports to attempt to connect to. """ if self._parsed_uri.scheme != b"matrix": return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)] # Note: We don't do well-known lookup as that needs to have happened # before now, due to needing to rewrite the Host header of the HTTP # request. # We reparse the URI so that defaultPort is -1 rather than 80 parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes()) host = parsed_uri.hostname port = parsed_uri.port # If there is an explicit port or the host is an IP address we bypass # SRV lookups and just use the given host/port. if port or _is_ip_literal(host): return [Server(host, port or 8448)] server_list = await self._srv_resolver.resolve_service(b"_matrix._tcp." + host) if server_list: return server_list # No SRV records, so we fallback to host and 8448 return [Server(host, 8448)] def _is_ip_literal(host: bytes) -> bool: """Test if the given host name is either an IPv4 or IPv6 literal. Args: host: The host name to check Returns: True if the hostname is an IP address literal. """ host_str = host.decode("ascii") try: IPAddress(host_str) return True except AddrFormatError: return False
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_10
crossvul-python_data_good_1915_13
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import logging import os import shutil from typing import IO, Dict, List, Optional, Tuple import twisted.internet.error import twisted.web.http from twisted.web.http import Request from twisted.web.resource import Resource from synapse.api.errors import ( FederationDeniedError, HttpResponseException, NotFoundError, RequestSendFailed, SynapseError, ) from synapse.config._base import ConfigError from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string from ._base import ( FileInfo, Responder, get_filename_from_headers, respond_404, respond_with_responder, ) from .config_resource import MediaConfigResource from .download_resource import DownloadResource from .filepath import MediaFilePaths from .media_storage import MediaStorage from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper from .thumbnail_resource import ThumbnailResource from .thumbnailer import Thumbnailer, ThumbnailError from .upload_resource import UploadResource logger = logging.getLogger(__name__) UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 class MediaRepository: def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() self.client = hs.get_federation_http_client() self.clock = hs.get_clock() self.server_name = hs.hostname self.store = hs.get_datastore() self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels self.primary_base_path = hs.config.media_store_path self.filepaths = MediaFilePaths(self.primary_base_path) self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements self.remote_media_linearizer = Linearizer(name="media_remote") self.recently_accessed_remotes = set() self.recently_accessed_locals = set() self.federation_domain_whitelist = hs.config.federation_domain_whitelist # List of StorageProviders where we should search for media and # potentially upload to. storage_providers = [] for clz, provider_config, wrapper_config in hs.config.media_storage_providers: backend = clz(hs, provider_config) provider = StorageProviderWrapper( backend, store_local=wrapper_config.store_local, store_remote=wrapper_config.store_remote, store_synchronous=wrapper_config.store_synchronous, ) storage_providers.append(provider) self.media_storage = MediaStorage( self.hs, self.primary_base_path, self.filepaths, storage_providers ) self.clock.looping_call( self._start_update_recently_accessed, UPDATE_RECENTLY_ACCESSED_TS ) def _start_update_recently_accessed(self): return run_as_background_process( "update_recently_accessed_media", self._update_recently_accessed ) async def _update_recently_accessed(self): remote_media = self.recently_accessed_remotes self.recently_accessed_remotes = set() local_media = self.recently_accessed_locals self.recently_accessed_locals = set() await self.store.update_cached_last_access_time( local_media, remote_media, self.clock.time_msec() ) def mark_recently_accessed(self, server_name, media_id): """Mark the given media as recently accessed. Args: server_name (str|None): Origin server of media, or None if local media_id (str): The media ID of the content """ if server_name: self.recently_accessed_remotes.add((server_name, media_id)) else: self.recently_accessed_locals.add(media_id) async def create_content( self, media_type: str, upload_name: Optional[str], content: IO, content_length: int, auth_user: str, ) -> str: """Store uploaded content for a local user and return the mxc URL Args: media_type: The content type of the file. upload_name: The name of the file, if provided. content: A file like object that is the content to store content_length: The length of the content auth_user: The user_id of the uploader Returns: The mxc url of the stored content """ media_id = random_string(24) file_info = FileInfo(server_name=None, file_id=media_id) fname = await self.media_storage.store_file(content, file_info) logger.info("Stored local media in file %r", fname) await self.store.store_local_media( media_id=media_id, media_type=media_type, time_now_ms=self.clock.time_msec(), upload_name=upload_name, media_length=content_length, user_id=auth_user, ) await self._generate_thumbnails(None, media_id, media_id, media_type) return "mxc://%s/%s" % (self.server_name, media_id) async def get_local_media( self, request: Request, media_id: str, name: Optional[str] ) -> None: """Responds to reqests for local media, if exists, or returns 404. Args: request: The incoming request. media_id: The media ID of the content. (This is the same as the file_id for local content.) name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. Returns: Resolves once a response has successfully been written to request """ media_info = await self.store.get_local_media(media_id) if not media_info or media_info["quarantined_by"]: respond_404(request) return self.mark_recently_accessed(None, media_id) media_type = media_info["media_type"] media_length = media_info["media_length"] upload_name = name if name else media_info["upload_name"] url_cache = media_info["url_cache"] file_info = FileInfo(None, media_id, url_cache=url_cache) responder = await self.media_storage.fetch_media(file_info) await respond_with_responder( request, responder, media_type, media_length, upload_name ) async def get_remote_media( self, request: Request, server_name: str, media_id: str, name: Optional[str] ) -> None: """Respond to requests for remote media. Args: request: The incoming request. server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. Returns: Resolves once a response has successfully been written to request """ if ( self.federation_domain_whitelist is not None and server_name not in self.federation_domain_whitelist ): raise FederationDeniedError(server_name) self.mark_recently_accessed(server_name, media_id) # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) with (await self.remote_media_linearizer.queue(key)): responder, media_info = await self._get_remote_media_impl( server_name, media_id ) # We deliberately stream the file outside the lock if responder: media_type = media_info["media_type"] media_length = media_info["media_length"] upload_name = name if name else media_info["upload_name"] await respond_with_responder( request, responder, media_type, media_length, upload_name ) else: respond_404(request) async def get_remote_media_info(self, server_name: str, media_id: str) -> dict: """Gets the media info associated with the remote file, downloading if necessary. Args: server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). Returns: The media info of the file """ if ( self.federation_domain_whitelist is not None and server_name not in self.federation_domain_whitelist ): raise FederationDeniedError(server_name) # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) with (await self.remote_media_linearizer.queue(key)): responder, media_info = await self._get_remote_media_impl( server_name, media_id ) # Ensure we actually use the responder so that it releases resources if responder: with responder: pass return media_info async def _get_remote_media_impl( self, server_name: str, media_id: str ) -> Tuple[Optional[Responder], dict]: """Looks for media in local cache, if not there then attempt to download from remote server. Args: server_name (str): Remote server_name where the media originated. media_id (str): The media ID of the content (as defined by the remote server). Returns: A tuple of responder and the media info of the file. """ media_info = await self.store.get_cached_remote_media(server_name, media_id) # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise genereate a new # one. # If we have an entry in the DB, try and look for it if media_info: file_id = media_info["filesystem_id"] file_info = FileInfo(server_name, file_id) if media_info["quarantined_by"]: logger.info("Media is quarantined") raise NotFoundError() responder = await self.media_storage.fetch_media(file_info) if responder: return responder, media_info # Failed to find the file anywhere, lets download it. try: media_info = await self._download_remote_file(server_name, media_id,) except SynapseError: raise except Exception as e: # An exception may be because we downloaded media in another # process, so let's check if we magically have the media. media_info = await self.store.get_cached_remote_media(server_name, media_id) if not media_info: raise e file_id = media_info["filesystem_id"] file_info = FileInfo(server_name, file_id) # We generate thumbnails even if another process downloaded the media # as a) it's conceivable that the other download request dies before it # generates thumbnails, but mainly b) we want to be sure the thumbnails # have finished being generated before responding to the client, # otherwise they'll request thumbnails and get a 404 if they're not # ready yet. await self._generate_thumbnails( server_name, media_id, file_id, media_info["media_type"] ) responder = await self.media_storage.fetch_media(file_info) return responder, media_info async def _download_remote_file(self, server_name: str, media_id: str,) -> dict: """Attempt to download the remote file from the given server name, using the given file_id as the local id. Args: server_name: Originating server media_id: The media ID of the content (as defined by the remote server). This is different than the file_id, which is locally generated. file_id: Local file ID Returns: The media info of the file. """ file_id = random_string(24) file_info = FileInfo(server_name=server_name, file_id=file_id) with self.media_storage.store_into_file(file_info) as (f, fname, finish): request_path = "/".join( ("/_matrix/media/r0/download", server_name, media_id) ) try: length, headers = await self.client.get_file( server_name, request_path, output_stream=f, max_size=self.max_upload_size, args={ # tell the remote server to 404 if it doesn't # recognise the server_name, to make sure we don't # end up with a routing loop. "allow_remote": "false" }, ) except RequestSendFailed as e: logger.warning( "Request failed fetching remote media %s/%s: %r", server_name, media_id, e, ) raise SynapseError(502, "Failed to fetch remote media") except HttpResponseException as e: logger.warning( "HTTP error fetching remote media %s/%s: %s", server_name, media_id, e.response, ) if e.code == twisted.web.http.NOT_FOUND: raise e.to_synapse_error() raise SynapseError(502, "Failed to fetch remote media") except SynapseError: logger.warning( "Failed to fetch remote media %s/%s", server_name, media_id ) raise except NotRetryingDestination: logger.warning("Not retrying destination %r", server_name) raise SynapseError(502, "Failed to fetch remote media") except Exception: logger.exception( "Failed to fetch remote media %s/%s", server_name, media_id ) raise SynapseError(502, "Failed to fetch remote media") await finish() media_type = headers[b"Content-Type"][0].decode("ascii") upload_name = get_filename_from_headers(headers) time_now_ms = self.clock.time_msec() # Multiple remote media download requests can race (when using # multiple media repos), so this may throw a violation constraint # exception. If it does we'll delete the newly downloaded file from # disk (as we're in the ctx manager). # # However: we've already called `finish()` so we may have also # written to the storage providers. This is preferable to the # alternative where we call `finish()` *after* this, where we could # end up having an entry in the DB but fail to write the files to # the storage providers. await self.store.store_cached_remote_media( origin=server_name, media_id=media_id, media_type=media_type, time_now_ms=self.clock.time_msec(), upload_name=upload_name, media_length=length, filesystem_id=file_id, ) logger.info("Stored remote media in file %r", fname) media_info = { "media_type": media_type, "media_length": length, "upload_name": upload_name, "created_ts": time_now_ms, "filesystem_id": file_id, } return media_info def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) def _generate_thumbnail(self, thumbnailer, t_width, t_height, t_method, t_type): m_width = thumbnailer.width m_height = thumbnailer.height if m_width * m_height >= self.max_image_pixels: logger.info( "Image too large to thumbnail %r x %r > %r", m_width, m_height, self.max_image_pixels, ) return if thumbnailer.transpose_method is not None: m_width, m_height = thumbnailer.transpose() if t_method == "crop": t_byte_source = thumbnailer.crop(t_width, t_height, t_type) elif t_method == "scale": t_width, t_height = thumbnailer.aspect(t_width, t_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) t_byte_source = thumbnailer.scale(t_width, t_height, t_type) else: t_byte_source = None return t_byte_source async def generate_local_exact_thumbnail( self, media_id: str, t_width: int, t_height: int, t_method: str, t_type: str, url_cache: str, ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate a thumbnail for local media %s using a method of %s and type of %s: %s", media_id, t_method, t_type, e, ) return None t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type, ) if t_byte_source: try: file_info = FileInfo( server_name=None, file_id=media_id, url_cache=url_cache, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, ) output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: t_byte_source.close() logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return output_path # Could not generate thumbnail. return None async def generate_remote_exact_thumbnail( self, server_name: str, file_id: str, media_id: str, t_width: int, t_height: int, t_method: str, t_type: str, ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=False) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate a thumbnail for remote media %s from %s using a method of %s and type of %s: %s", media_id, server_name, t_method, t_type, e, ) return None t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type, ) if t_byte_source: try: file_info = FileInfo( server_name=server_name, file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, ) output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: t_byte_source.close() logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len, ) return output_path # Could not generate thumbnail. return None async def _generate_thumbnails( self, server_name: Optional[str], media_id: str, file_id: str, media_type: str, url_cache: bool = False, ) -> Optional[dict]: """Generate and store thumbnails for an image. Args: server_name: The server name if remote media, else None if local media_id: The media ID of the content. (This is the same as the file_id for local content) file_id: Local file ID media_type: The content type of the file url_cache: If we are thumbnailing images downloaded for the URL cache, used exclusively by the url previewer Returns: Dict with "width" and "height" keys of original image or None if the media cannot be thumbnailed. """ requirements = self._get_thumbnail_requirements(media_type) if not requirements: return None input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=url_cache) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate thumbnails for remote media %s from %s of type %s: %s", media_id, server_name, media_type, e, ) return None m_width = thumbnailer.width m_height = thumbnailer.height if m_width * m_height >= self.max_image_pixels: logger.info( "Image too large to thumbnail %r x %r > %r", m_width, m_height, self.max_image_pixels, ) return None if thumbnailer.transpose_method is not None: m_width, m_height = await defer_to_thread( self.hs.get_reactor(), thumbnailer.transpose ) # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. thumbnails = {} # type: Dict[Tuple[int, int, str], str] for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": thumbnails.setdefault((r_width, r_height, r_type), r_method) elif r_method == "scale": t_width, t_height = thumbnailer.aspect(r_width, r_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) thumbnails[(t_width, t_height, r_type)] = r_method # Now we generate the thumbnails for each dimension, store it for (t_width, t_height, t_type), t_method in thumbnails.items(): # Generate the thumbnail if t_method == "crop": t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type ) elif t_method == "scale": t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type ) else: logger.error("Unrecognized method: %r", t_method) continue if not t_byte_source: continue file_info = FileInfo( server_name=server_name, file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, url_cache=url_cache, ) with self.media_storage.store_into_file(file_info) as (f, fname, finish): try: await self.media_storage.write_to_file(t_byte_source, f) await finish() finally: t_byte_source.close() t_len = os.path.getsize(fname) # Write to database if server_name: # Multiple remote media download requests can race (when # using multiple media repos), so this may throw a violation # constraint exception. If it does we'll delete the newly # generated thumbnail from disk (as we're in the ctx # manager). # # However: we've already called `finish()` so we may have # also written to the storage providers. This is preferable # to the alternative where we call `finish()` *after* this, # where we could end up having an entry in the DB but fail # to write the files to the storage providers. try: await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len, ) except Exception as e: thumbnail_exists = await self.store.get_remote_media_thumbnail( server_name, media_id, t_width, t_height, t_type, ) if not thumbnail_exists: raise e else: await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return {"width": m_width, "height": m_height} async def delete_old_remote_media(self, before_ts): old_media = await self.store.get_remote_media_before(before_ts) deleted = 0 for media in old_media: origin = media["media_origin"] media_id = media["media_id"] file_id = media["filesystem_id"] key = (origin, media_id) logger.info("Deleting: %r", key) # TODO: Should we delete from the backup store with (await self.remote_media_linearizer.queue(key)): full_path = self.filepaths.remote_media_filepath(origin, file_id) try: os.remove(full_path) except OSError as e: logger.warning("Failed to remove file: %r", full_path) if e.errno == errno.ENOENT: pass else: continue thumbnail_dir = self.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) await self.store.delete_remote_media(origin, media_id) deleted += 1 return {"deleted": deleted} async def delete_local_media(self, media_id: str) -> Tuple[List[str], int]: """ Delete the given local or remote media ID from this server Args: media_id: The media ID to delete. Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ return await self._remove_local_media_from_disk([media_id]) async def delete_old_local_media( self, before_ts: int, size_gt: int = 0, keep_profiles: bool = True, ) -> Tuple[List[str], int]: """ Delete local or remote media from this server by size and timestamp. Removes media files, any thumbnails and cached URLs. Args: before_ts: Unix timestamp in ms. Files that were last used before this timestamp will be deleted size_gt: Size of the media in bytes. Files that are larger will be deleted keep_profiles: Switch to delete also files that are still used in image data (e.g user profile, room avatar) If false these files will be deleted Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ old_media = await self.store.get_local_media_before( before_ts, size_gt, keep_profiles, ) return await self._remove_local_media_from_disk(old_media) async def _remove_local_media_from_disk( self, media_ids: List[str] ) -> Tuple[List[str], int]: """ Delete local or remote media from this server. Removes media files, any thumbnails and cached URLs. Args: media_ids: List of media_id to delete Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ removed_media = [] for media_id in media_ids: logger.info("Deleting media with ID '%s'", media_id) full_path = self.filepaths.local_media_filepath(media_id) try: os.remove(full_path) except OSError as e: logger.warning("Failed to remove file: %r: %s", full_path, e) if e.errno == errno.ENOENT: pass else: continue thumbnail_dir = self.filepaths.local_media_thumbnail_dir(media_id) shutil.rmtree(thumbnail_dir, ignore_errors=True) await self.store.delete_remote_media(self.server_name, media_id) await self.store.delete_url_cache((media_id,)) await self.store.delete_url_cache_media((media_id,)) removed_media.append(media_id) return removed_media, len(removed_media) class MediaRepositoryResource(Resource): """File uploading and downloading. Uploads are POSTed to a resource which returns a token which is used to GET the download:: => POST /_matrix/media/r0/upload HTTP/1.1 Content-Type: <media-type> Content-Length: <content-length> <media> <= HTTP/1.1 200 OK Content-Type: application/json { "content_uri": "mxc://<server-name>/<media-id>" } => GET /_matrix/media/r0/download/<server-name>/<media-id> HTTP/1.1 <= HTTP/1.1 200 OK Content-Type: <media-type> Content-Disposition: attachment;filename=<upload-filename> <media> Clients can get thumbnails by supplying a desired width and height and thumbnailing method:: => GET /_matrix/media/r0/thumbnail/<server_name> /<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1 <= HTTP/1.1 200 OK Content-Type: image/jpeg or image/png <thumbnail> The thumbnail methods are "crop" and "scale". "scale" trys to return an image where either the width or the height is smaller than the requested size. The client should then scale and letterbox the image if it needs to fit within a given rectangle. "crop" trys to return an image where the width and height are close to the requested size and the aspect matches the requested size. The client should scale the image if it needs to fit within a given rectangle. """ def __init__(self, hs): # If we're not configured to use it, raise if we somehow got here. if not hs.config.can_load_media_repo: raise ConfigError("Synapse is not configured to use a media repo.") super().__init__() media_repo = hs.get_media_repository() self.putChild(b"upload", UploadResource(hs, media_repo)) self.putChild(b"download", DownloadResource(hs, media_repo)) self.putChild( b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) ) if hs.config.url_preview_enabled: self.putChild( b"preview_url", PreviewUrlResource(hs, media_repo, media_repo.media_storage), ) self.putChild(b"config", MediaConfigResource(hs))
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_13
crossvul-python_data_bad_1915_13
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import logging import os import shutil from typing import IO, Dict, List, Optional, Tuple import twisted.internet.error import twisted.web.http from twisted.web.http import Request from twisted.web.resource import Resource from synapse.api.errors import ( FederationDeniedError, HttpResponseException, NotFoundError, RequestSendFailed, SynapseError, ) from synapse.config._base import ConfigError from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string from ._base import ( FileInfo, Responder, get_filename_from_headers, respond_404, respond_with_responder, ) from .config_resource import MediaConfigResource from .download_resource import DownloadResource from .filepath import MediaFilePaths from .media_storage import MediaStorage from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper from .thumbnail_resource import ThumbnailResource from .thumbnailer import Thumbnailer, ThumbnailError from .upload_resource import UploadResource logger = logging.getLogger(__name__) UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 class MediaRepository: def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() self.client = hs.get_http_client() self.clock = hs.get_clock() self.server_name = hs.hostname self.store = hs.get_datastore() self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels self.primary_base_path = hs.config.media_store_path self.filepaths = MediaFilePaths(self.primary_base_path) self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements self.remote_media_linearizer = Linearizer(name="media_remote") self.recently_accessed_remotes = set() self.recently_accessed_locals = set() self.federation_domain_whitelist = hs.config.federation_domain_whitelist # List of StorageProviders where we should search for media and # potentially upload to. storage_providers = [] for clz, provider_config, wrapper_config in hs.config.media_storage_providers: backend = clz(hs, provider_config) provider = StorageProviderWrapper( backend, store_local=wrapper_config.store_local, store_remote=wrapper_config.store_remote, store_synchronous=wrapper_config.store_synchronous, ) storage_providers.append(provider) self.media_storage = MediaStorage( self.hs, self.primary_base_path, self.filepaths, storage_providers ) self.clock.looping_call( self._start_update_recently_accessed, UPDATE_RECENTLY_ACCESSED_TS ) def _start_update_recently_accessed(self): return run_as_background_process( "update_recently_accessed_media", self._update_recently_accessed ) async def _update_recently_accessed(self): remote_media = self.recently_accessed_remotes self.recently_accessed_remotes = set() local_media = self.recently_accessed_locals self.recently_accessed_locals = set() await self.store.update_cached_last_access_time( local_media, remote_media, self.clock.time_msec() ) def mark_recently_accessed(self, server_name, media_id): """Mark the given media as recently accessed. Args: server_name (str|None): Origin server of media, or None if local media_id (str): The media ID of the content """ if server_name: self.recently_accessed_remotes.add((server_name, media_id)) else: self.recently_accessed_locals.add(media_id) async def create_content( self, media_type: str, upload_name: Optional[str], content: IO, content_length: int, auth_user: str, ) -> str: """Store uploaded content for a local user and return the mxc URL Args: media_type: The content type of the file. upload_name: The name of the file, if provided. content: A file like object that is the content to store content_length: The length of the content auth_user: The user_id of the uploader Returns: The mxc url of the stored content """ media_id = random_string(24) file_info = FileInfo(server_name=None, file_id=media_id) fname = await self.media_storage.store_file(content, file_info) logger.info("Stored local media in file %r", fname) await self.store.store_local_media( media_id=media_id, media_type=media_type, time_now_ms=self.clock.time_msec(), upload_name=upload_name, media_length=content_length, user_id=auth_user, ) await self._generate_thumbnails(None, media_id, media_id, media_type) return "mxc://%s/%s" % (self.server_name, media_id) async def get_local_media( self, request: Request, media_id: str, name: Optional[str] ) -> None: """Responds to reqests for local media, if exists, or returns 404. Args: request: The incoming request. media_id: The media ID of the content. (This is the same as the file_id for local content.) name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. Returns: Resolves once a response has successfully been written to request """ media_info = await self.store.get_local_media(media_id) if not media_info or media_info["quarantined_by"]: respond_404(request) return self.mark_recently_accessed(None, media_id) media_type = media_info["media_type"] media_length = media_info["media_length"] upload_name = name if name else media_info["upload_name"] url_cache = media_info["url_cache"] file_info = FileInfo(None, media_id, url_cache=url_cache) responder = await self.media_storage.fetch_media(file_info) await respond_with_responder( request, responder, media_type, media_length, upload_name ) async def get_remote_media( self, request: Request, server_name: str, media_id: str, name: Optional[str] ) -> None: """Respond to requests for remote media. Args: request: The incoming request. server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). name: Optional name that, if specified, will be used as the filename in the Content-Disposition header of the response. Returns: Resolves once a response has successfully been written to request """ if ( self.federation_domain_whitelist is not None and server_name not in self.federation_domain_whitelist ): raise FederationDeniedError(server_name) self.mark_recently_accessed(server_name, media_id) # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) with (await self.remote_media_linearizer.queue(key)): responder, media_info = await self._get_remote_media_impl( server_name, media_id ) # We deliberately stream the file outside the lock if responder: media_type = media_info["media_type"] media_length = media_info["media_length"] upload_name = name if name else media_info["upload_name"] await respond_with_responder( request, responder, media_type, media_length, upload_name ) else: respond_404(request) async def get_remote_media_info(self, server_name: str, media_id: str) -> dict: """Gets the media info associated with the remote file, downloading if necessary. Args: server_name: Remote server_name where the media originated. media_id: The media ID of the content (as defined by the remote server). Returns: The media info of the file """ if ( self.federation_domain_whitelist is not None and server_name not in self.federation_domain_whitelist ): raise FederationDeniedError(server_name) # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) with (await self.remote_media_linearizer.queue(key)): responder, media_info = await self._get_remote_media_impl( server_name, media_id ) # Ensure we actually use the responder so that it releases resources if responder: with responder: pass return media_info async def _get_remote_media_impl( self, server_name: str, media_id: str ) -> Tuple[Optional[Responder], dict]: """Looks for media in local cache, if not there then attempt to download from remote server. Args: server_name (str): Remote server_name where the media originated. media_id (str): The media ID of the content (as defined by the remote server). Returns: A tuple of responder and the media info of the file. """ media_info = await self.store.get_cached_remote_media(server_name, media_id) # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise genereate a new # one. # If we have an entry in the DB, try and look for it if media_info: file_id = media_info["filesystem_id"] file_info = FileInfo(server_name, file_id) if media_info["quarantined_by"]: logger.info("Media is quarantined") raise NotFoundError() responder = await self.media_storage.fetch_media(file_info) if responder: return responder, media_info # Failed to find the file anywhere, lets download it. try: media_info = await self._download_remote_file(server_name, media_id,) except SynapseError: raise except Exception as e: # An exception may be because we downloaded media in another # process, so let's check if we magically have the media. media_info = await self.store.get_cached_remote_media(server_name, media_id) if not media_info: raise e file_id = media_info["filesystem_id"] file_info = FileInfo(server_name, file_id) # We generate thumbnails even if another process downloaded the media # as a) it's conceivable that the other download request dies before it # generates thumbnails, but mainly b) we want to be sure the thumbnails # have finished being generated before responding to the client, # otherwise they'll request thumbnails and get a 404 if they're not # ready yet. await self._generate_thumbnails( server_name, media_id, file_id, media_info["media_type"] ) responder = await self.media_storage.fetch_media(file_info) return responder, media_info async def _download_remote_file(self, server_name: str, media_id: str,) -> dict: """Attempt to download the remote file from the given server name, using the given file_id as the local id. Args: server_name: Originating server media_id: The media ID of the content (as defined by the remote server). This is different than the file_id, which is locally generated. file_id: Local file ID Returns: The media info of the file. """ file_id = random_string(24) file_info = FileInfo(server_name=server_name, file_id=file_id) with self.media_storage.store_into_file(file_info) as (f, fname, finish): request_path = "/".join( ("/_matrix/media/r0/download", server_name, media_id) ) try: length, headers = await self.client.get_file( server_name, request_path, output_stream=f, max_size=self.max_upload_size, args={ # tell the remote server to 404 if it doesn't # recognise the server_name, to make sure we don't # end up with a routing loop. "allow_remote": "false" }, ) except RequestSendFailed as e: logger.warning( "Request failed fetching remote media %s/%s: %r", server_name, media_id, e, ) raise SynapseError(502, "Failed to fetch remote media") except HttpResponseException as e: logger.warning( "HTTP error fetching remote media %s/%s: %s", server_name, media_id, e.response, ) if e.code == twisted.web.http.NOT_FOUND: raise e.to_synapse_error() raise SynapseError(502, "Failed to fetch remote media") except SynapseError: logger.warning( "Failed to fetch remote media %s/%s", server_name, media_id ) raise except NotRetryingDestination: logger.warning("Not retrying destination %r", server_name) raise SynapseError(502, "Failed to fetch remote media") except Exception: logger.exception( "Failed to fetch remote media %s/%s", server_name, media_id ) raise SynapseError(502, "Failed to fetch remote media") await finish() media_type = headers[b"Content-Type"][0].decode("ascii") upload_name = get_filename_from_headers(headers) time_now_ms = self.clock.time_msec() # Multiple remote media download requests can race (when using # multiple media repos), so this may throw a violation constraint # exception. If it does we'll delete the newly downloaded file from # disk (as we're in the ctx manager). # # However: we've already called `finish()` so we may have also # written to the storage providers. This is preferable to the # alternative where we call `finish()` *after* this, where we could # end up having an entry in the DB but fail to write the files to # the storage providers. await self.store.store_cached_remote_media( origin=server_name, media_id=media_id, media_type=media_type, time_now_ms=self.clock.time_msec(), upload_name=upload_name, media_length=length, filesystem_id=file_id, ) logger.info("Stored remote media in file %r", fname) media_info = { "media_type": media_type, "media_length": length, "upload_name": upload_name, "created_ts": time_now_ms, "filesystem_id": file_id, } return media_info def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) def _generate_thumbnail(self, thumbnailer, t_width, t_height, t_method, t_type): m_width = thumbnailer.width m_height = thumbnailer.height if m_width * m_height >= self.max_image_pixels: logger.info( "Image too large to thumbnail %r x %r > %r", m_width, m_height, self.max_image_pixels, ) return if thumbnailer.transpose_method is not None: m_width, m_height = thumbnailer.transpose() if t_method == "crop": t_byte_source = thumbnailer.crop(t_width, t_height, t_type) elif t_method == "scale": t_width, t_height = thumbnailer.aspect(t_width, t_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) t_byte_source = thumbnailer.scale(t_width, t_height, t_type) else: t_byte_source = None return t_byte_source async def generate_local_exact_thumbnail( self, media_id: str, t_width: int, t_height: int, t_method: str, t_type: str, url_cache: str, ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate a thumbnail for local media %s using a method of %s and type of %s: %s", media_id, t_method, t_type, e, ) return None t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type, ) if t_byte_source: try: file_info = FileInfo( server_name=None, file_id=media_id, url_cache=url_cache, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, ) output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: t_byte_source.close() logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return output_path # Could not generate thumbnail. return None async def generate_remote_exact_thumbnail( self, server_name: str, file_id: str, media_id: str, t_width: int, t_height: int, t_method: str, t_type: str, ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=False) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate a thumbnail for remote media %s from %s using a method of %s and type of %s: %s", media_id, server_name, t_method, t_type, e, ) return None t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type, ) if t_byte_source: try: file_info = FileInfo( server_name=server_name, file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, ) output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: t_byte_source.close() logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len, ) return output_path # Could not generate thumbnail. return None async def _generate_thumbnails( self, server_name: Optional[str], media_id: str, file_id: str, media_type: str, url_cache: bool = False, ) -> Optional[dict]: """Generate and store thumbnails for an image. Args: server_name: The server name if remote media, else None if local media_id: The media ID of the content. (This is the same as the file_id for local content) file_id: Local file ID media_type: The content type of the file url_cache: If we are thumbnailing images downloaded for the URL cache, used exclusively by the url previewer Returns: Dict with "width" and "height" keys of original image or None if the media cannot be thumbnailed. """ requirements = self._get_thumbnail_requirements(media_type) if not requirements: return None input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=url_cache) ) try: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( "Unable to generate thumbnails for remote media %s from %s of type %s: %s", media_id, server_name, media_type, e, ) return None m_width = thumbnailer.width m_height = thumbnailer.height if m_width * m_height >= self.max_image_pixels: logger.info( "Image too large to thumbnail %r x %r > %r", m_width, m_height, self.max_image_pixels, ) return None if thumbnailer.transpose_method is not None: m_width, m_height = await defer_to_thread( self.hs.get_reactor(), thumbnailer.transpose ) # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. thumbnails = {} # type: Dict[Tuple[int, int, str], str] for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": thumbnails.setdefault((r_width, r_height, r_type), r_method) elif r_method == "scale": t_width, t_height = thumbnailer.aspect(r_width, r_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) thumbnails[(t_width, t_height, r_type)] = r_method # Now we generate the thumbnails for each dimension, store it for (t_width, t_height, t_type), t_method in thumbnails.items(): # Generate the thumbnail if t_method == "crop": t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type ) elif t_method == "scale": t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type ) else: logger.error("Unrecognized method: %r", t_method) continue if not t_byte_source: continue file_info = FileInfo( server_name=server_name, file_id=file_id, thumbnail=True, thumbnail_width=t_width, thumbnail_height=t_height, thumbnail_method=t_method, thumbnail_type=t_type, url_cache=url_cache, ) with self.media_storage.store_into_file(file_info) as (f, fname, finish): try: await self.media_storage.write_to_file(t_byte_source, f) await finish() finally: t_byte_source.close() t_len = os.path.getsize(fname) # Write to database if server_name: # Multiple remote media download requests can race (when # using multiple media repos), so this may throw a violation # constraint exception. If it does we'll delete the newly # generated thumbnail from disk (as we're in the ctx # manager). # # However: we've already called `finish()` so we may have # also written to the storage providers. This is preferable # to the alternative where we call `finish()` *after* this, # where we could end up having an entry in the DB but fail # to write the files to the storage providers. try: await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len, ) except Exception as e: thumbnail_exists = await self.store.get_remote_media_thumbnail( server_name, media_id, t_width, t_height, t_type, ) if not thumbnail_exists: raise e else: await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return {"width": m_width, "height": m_height} async def delete_old_remote_media(self, before_ts): old_media = await self.store.get_remote_media_before(before_ts) deleted = 0 for media in old_media: origin = media["media_origin"] media_id = media["media_id"] file_id = media["filesystem_id"] key = (origin, media_id) logger.info("Deleting: %r", key) # TODO: Should we delete from the backup store with (await self.remote_media_linearizer.queue(key)): full_path = self.filepaths.remote_media_filepath(origin, file_id) try: os.remove(full_path) except OSError as e: logger.warning("Failed to remove file: %r", full_path) if e.errno == errno.ENOENT: pass else: continue thumbnail_dir = self.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) await self.store.delete_remote_media(origin, media_id) deleted += 1 return {"deleted": deleted} async def delete_local_media(self, media_id: str) -> Tuple[List[str], int]: """ Delete the given local or remote media ID from this server Args: media_id: The media ID to delete. Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ return await self._remove_local_media_from_disk([media_id]) async def delete_old_local_media( self, before_ts: int, size_gt: int = 0, keep_profiles: bool = True, ) -> Tuple[List[str], int]: """ Delete local or remote media from this server by size and timestamp. Removes media files, any thumbnails and cached URLs. Args: before_ts: Unix timestamp in ms. Files that were last used before this timestamp will be deleted size_gt: Size of the media in bytes. Files that are larger will be deleted keep_profiles: Switch to delete also files that are still used in image data (e.g user profile, room avatar) If false these files will be deleted Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ old_media = await self.store.get_local_media_before( before_ts, size_gt, keep_profiles, ) return await self._remove_local_media_from_disk(old_media) async def _remove_local_media_from_disk( self, media_ids: List[str] ) -> Tuple[List[str], int]: """ Delete local or remote media from this server. Removes media files, any thumbnails and cached URLs. Args: media_ids: List of media_id to delete Returns: A tuple of (list of deleted media IDs, total deleted media IDs). """ removed_media = [] for media_id in media_ids: logger.info("Deleting media with ID '%s'", media_id) full_path = self.filepaths.local_media_filepath(media_id) try: os.remove(full_path) except OSError as e: logger.warning("Failed to remove file: %r: %s", full_path, e) if e.errno == errno.ENOENT: pass else: continue thumbnail_dir = self.filepaths.local_media_thumbnail_dir(media_id) shutil.rmtree(thumbnail_dir, ignore_errors=True) await self.store.delete_remote_media(self.server_name, media_id) await self.store.delete_url_cache((media_id,)) await self.store.delete_url_cache_media((media_id,)) removed_media.append(media_id) return removed_media, len(removed_media) class MediaRepositoryResource(Resource): """File uploading and downloading. Uploads are POSTed to a resource which returns a token which is used to GET the download:: => POST /_matrix/media/r0/upload HTTP/1.1 Content-Type: <media-type> Content-Length: <content-length> <media> <= HTTP/1.1 200 OK Content-Type: application/json { "content_uri": "mxc://<server-name>/<media-id>" } => GET /_matrix/media/r0/download/<server-name>/<media-id> HTTP/1.1 <= HTTP/1.1 200 OK Content-Type: <media-type> Content-Disposition: attachment;filename=<upload-filename> <media> Clients can get thumbnails by supplying a desired width and height and thumbnailing method:: => GET /_matrix/media/r0/thumbnail/<server_name> /<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1 <= HTTP/1.1 200 OK Content-Type: image/jpeg or image/png <thumbnail> The thumbnail methods are "crop" and "scale". "scale" trys to return an image where either the width or the height is smaller than the requested size. The client should then scale and letterbox the image if it needs to fit within a given rectangle. "crop" trys to return an image where the width and height are close to the requested size and the aspect matches the requested size. The client should scale the image if it needs to fit within a given rectangle. """ def __init__(self, hs): # If we're not configured to use it, raise if we somehow got here. if not hs.config.can_load_media_repo: raise ConfigError("Synapse is not configured to use a media repo.") super().__init__() media_repo = hs.get_media_repository() self.putChild(b"upload", UploadResource(hs, media_repo)) self.putChild(b"download", DownloadResource(hs, media_repo)) self.putChild( b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) ) if hs.config.url_preview_enabled: self.putChild( b"preview_url", PreviewUrlResource(hs, media_repo, media_repo.media_storage), ) self.putChild(b"config", MediaConfigResource(hs))
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_13
crossvul-python_data_good_1915_9
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib.parse from io import BytesIO from typing import ( TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union, ) import treq from canonicaljson import encode_canonical_json from netaddr import IPAddress, IPSet from prometheus_client import Counter from zope.interface import implementer, provider from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE from twisted.internet import defer, error as twisted_error, protocol, ssl from twisted.internet.interfaces import ( IAddress, IHostResolution, IReactorPluggableNameResolver, IResolutionReceiver, ) from twisted.internet.task import Cooperator from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone from twisted.web.client import ( Agent, HTTPConnectionPool, ResponseNeverReceived, readBody, ) from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IBodyProducer, IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred if TYPE_CHECKING: from synapse.app.homeserver import HomeServer logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) incoming_responses_counter = Counter( "synapse_http_client_responses", "", ["method", "code"] ) # the type of the headers list, to be passed to the t.w.h.Headers. # Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so # we simplify. RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. RawHeaderValue = Sequence[Union[str, bytes]] # the type of the query params, to be passed into `urlencode` QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]] QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]] def check_against_blacklist( ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check ip_whitelist: Allowed IP addresses. ip_blacklist: Disallowed IP addresses. Returns: True if the IP address is in the blacklist and not in the whitelist. """ if ip_address in ip_blacklist: if ip_whitelist is None or ip_address not in ip_whitelist: return True return False _EPSILON = 0.00000001 def _make_scheduler(reactor): """Makes a schedular suitable for a Cooperator using the given reactor. (This is effectively just a copy from `twisted.internet.task`) """ def _scheduler(x): return reactor.callLater(_EPSILON, x) return _scheduler class _IPBlacklistingResolver: """ A proxy for reactor.nameResolver which only produces non-blacklisted IP addresses, preventing DNS rebinding attacks on URL preview. """ def __init__( self, reactor: IReactorPluggableNameResolver, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet, ): """ Args: reactor: The twisted reactor. ip_whitelist: IP addresses to allow. ip_blacklist: IP addresses to disallow. """ self._reactor = reactor self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist def resolveHostName( self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0 ) -> IResolutionReceiver: r = recv() addresses = [] # type: List[IAddress] def _callback() -> None: r.resolutionBegan(None) has_bad_ip = False for i in addresses: ip_address = IPAddress(i.host) if check_against_blacklist( ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info( "Dropped %s from DNS resolution to %s due to blacklist" % (ip_address, hostname) ) has_bad_ip = True # if we have a blacklisted IP, we'd like to raise an error to block the # request, but all we can really do from here is claim that there were no # valid results. if not has_bad_ip: for i in addresses: r.addressResolved(i) r.resolutionComplete() @provider(IResolutionReceiver) class EndpointReceiver: @staticmethod def resolutionBegan(resolutionInProgress: IHostResolution) -> None: pass @staticmethod def addressResolved(address: IAddress) -> None: addresses.append(address) @staticmethod def resolutionComplete() -> None: _callback() self._reactor.nameResolver.resolveHostName( EndpointReceiver, hostname, portNumber=portNumber ) return r @implementer(IReactorPluggableNameResolver) class BlacklistingReactorWrapper: """ A Reactor wrapper which will prevent DNS resolution to blacklisted IP addresses, to prevent DNS rebinding. """ def __init__( self, reactor: IReactorPluggableNameResolver, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet, ): self._reactor = reactor # We need to use a DNS resolver which filters out blacklisted IP # addresses, to prevent DNS rebinding. self._nameResolver = _IPBlacklistingResolver( self._reactor, ip_whitelist, ip_blacklist ) def __getattr__(self, attr: str) -> Any: # Passthrough to the real reactor except for the DNS resolver. if attr == "nameResolver": return self._nameResolver else: return getattr(self._reactor, attr) class BlacklistingAgentWrapper(Agent): """ An Agent wrapper which will prevent access to IP addresses being accessed directly (without an IP address lookup). """ def __init__( self, agent: IAgent, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, ): """ Args: agent: The Agent to wrap. ip_whitelist: IP addresses to allow. ip_blacklist: IP addresses to disallow. """ self._agent = agent self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist def request( self, method: bytes, uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, ) -> defer.Deferred: h = urllib.parse.urlparse(uri.decode("ascii")) try: ip_address = IPAddress(h.hostname) if check_against_blacklist( ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info("Blocking access to %s due to blacklist" % (ip_address,)) e = SynapseError(403, "IP address blocked by IP blacklist entry") return defer.fail(Failure(e)) except Exception: # Not an IP pass return self._agent.request( method, uri, headers=headers, bodyProducer=bodyProducer ) class SimpleHttpClient: """ A simple, no-frills HTTP client with methods that wrap up common ways of using HTTP in Matrix """ def __init__( self, hs: "HomeServer", treq_args: Dict[str, Any] = {}, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, http_proxy: Optional[bytes] = None, https_proxy: Optional[bytes] = None, ): """ Args: hs treq_args: Extra keyword arguments to be given to treq.request. ip_blacklist: The IP addresses that are blacklisted that we may not request. ip_whitelist: The whitelisted IP addresses, that we can request if it were otherwise caught in a blacklist. http_proxy: proxy server to use for http connections. host[:port] https_proxy: proxy server to use for https connections. host[:port] """ self.hs = hs self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args self.user_agent = hs.version_string self.clock = hs.get_clock() if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix) # We use this for our body producers to ensure that they use the correct # reactor. self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) self.user_agent = self.user_agent.encode("ascii") if self._ip_blacklist: # If we have an IP blacklist, we need to use a DNS resolver which # filters out blacklisted IP addresses, to prevent DNS rebinding. self.reactor = BlacklistingReactorWrapper( hs.get_reactor(), self._ip_whitelist, self._ip_blacklist ) else: self.reactor = hs.get_reactor() # the pusher makes lots of concurrent SSL connections to sygnal, and # tends to do so in batches, so we need to allow the pool to keep # lots of idle connections around. pool = HTTPConnectionPool(self.reactor) # XXX: The justification for using the cache factor here is that larger instances # will need both more cache and more connections. # Still, this should probably be a separate dial pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5)) pool.cachedConnectionTimeout = 2 * 60 self.agent = ProxyAgent( self.reactor, connectTimeout=15, contextFactory=self.hs.get_http_client_context_factory(), pool=pool, http_proxy=http_proxy, https_proxy=https_proxy, ) if self._ip_blacklist: # If we have an IP blacklist, we then install the blacklisting Agent # which prevents direct access to IP addresses, that are not caught # by the DNS resolution. self.agent = BlacklistingAgentWrapper( self.agent, ip_whitelist=self._ip_whitelist, ip_blacklist=self._ip_blacklist, ) async def request( self, method: str, uri: str, data: Optional[bytes] = None, headers: Optional[Headers] = None, ) -> IResponse: """ Args: method: HTTP method to use. uri: URI to query. data: Data to send in the request body, if applicable. headers: Request headers. Returns: Response object, once the headers have been read. Raises: RequestTimedOutError if the request times out before the headers are read """ outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.debug("Sending request %s %s", method, redact_uri(uri)) with start_active_span( "outgoing-client-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.HTTP_METHOD: method, tags.HTTP_URL: uri, }, finish_on_close=True, ): try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) request_deferred = treq.request( method, uri, agent=self.agent, data=body_producer, headers=headers, **self._extra_treq_args, ) # type: defer.Deferred # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), ) # turn timeouts into RequestTimedOutErrors request_deferred.addErrback(_timeout_to_request_timed_out_error) response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code, ) return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) set_tag(tags.ERROR, True) set_tag("error_reason", e.args[0]) raise async def post_urlencoded_get_json( self, uri: str, args: Optional[Mapping[str, Union[str, List[str]]]] = None, headers: Optional[RawHeaders] = None, ) -> Any: """ Args: uri: uri to query args: parameters to be url-encoded in the body headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = encode_query_args(args) actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "POST", uri, headers=Headers(actual_headers), data=query_bytes ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def post_json_get_json( self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None ) -> Any: """ Args: uri: URI to query. post_json: request body, to be encoded as json headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "POST", uri, headers=Headers(actual_headers), data=json_str ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def get_json( self, uri: str, args: Optional[QueryParams] = None, headers: Optional[RawHeaders] = None, ) -> Any: """Gets some json from the given URI. Args: uri: The URI to request, not including query parameters args: A dictionary used to create query string headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ actual_headers = {b"Accept": [b"application/json"]} if headers: actual_headers.update(headers) # type: ignore body = await self.get_raw(uri, args, headers=headers) return json_decoder.decode(body.decode("utf-8")) async def put_json( self, uri: str, json_body: Any, args: Optional[QueryParams] = None, headers: RawHeaders = None, ) -> Any: """Puts some json to the given URI. Args: uri: The URI to request, not including query parameters json_body: The JSON to put in the HTTP body, args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) json_str = encode_canonical_json(json_body) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "PUT", uri, headers=Headers(actual_headers), data=json_str ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def get_raw( self, uri: str, args: Optional[QueryParams] = None, headers: Optional[RawHeaders] = None, ) -> bytes: """Gets raw text from the given URI. Args: uri: The URI to request, not including query parameters args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException on a non-2xx HTTP response. """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) # type: ignore response = await self.request("GET", uri, headers=Headers(actual_headers)) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return body else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. async def get_file( self, url: str, output_stream: BinaryIO, max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: """GETs a file from a given URL Args: url: The URL to GET output_stream: File to write the response body to. headers: A map from header name to a list of values for that header Returns: A tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. SynapseError: if the response is not a 2xx, the remote file is too large, or another exception happens during the download. """ actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) # type: ignore response = await self.request("GET", url, headers=Headers(actual_headers)) resp_headers = dict(response.headers.getAllRawHeaders()) if ( b"Content-Length" in resp_headers and max_size and int(resp_headers[b"Content-Length"][0]) > max_size ): logger.warning("Requested URL is too large > %r bytes" % (max_size,)) raise SynapseError( 502, "Requested file is too large > %r bytes" % (max_size,), Codes.TOO_LARGE, ) if response.code > 299: logger.warning("Got %d when downloading %s" % (response.code, url)) raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN) # TODO: if our Content-Type is HTML or something, just read the first # N bytes into RAM rather than saving it all to disk only to read it # straight back in again try: length = await make_deferred_yieldable( readBodyToFile(response, output_stream, max_size) ) except SynapseError: # This can happen e.g. because the body is too large. raise except Exception as e: raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e return ( length, resp_headers, response.request.absoluteURI.decode("ascii"), response.code, ) def _timeout_to_request_timed_out_error(f: Failure): if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): # The TCP connection has its own timeout (set by the 'connectTimeout' param # on the Agent), which raises twisted_error.TimeoutError exception. raise RequestTimedOutError("Timeout connecting to remote server") elif f.check(defer.TimeoutError, ResponseNeverReceived): # this one means that we hit our overall timeout on the request raise RequestTimedOutError("Timeout waiting for response from remote server") return f class _ReadBodyToFileProtocol(protocol.Protocol): def __init__( self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int] ): self.stream = stream self.deferred = deferred self.length = 0 self.max_size = max_size def dataReceived(self, data: bytes) -> None: self.stream.write(data) self.length += len(data) if self.max_size is not None and self.length >= self.max_size: self.deferred.errback( SynapseError( 502, "Requested file is too large > %r bytes" % (self.max_size,), Codes.TOO_LARGE, ) ) self.deferred = defer.Deferred() self.transport.loseConnection() def connectionLost(self, reason: Failure) -> None: if reason.check(ResponseDone): self.deferred.callback(self.length) elif reason.check(PotentialDataLoss): # stolen from https://github.com/twisted/treq/pull/49/files # http://twistedmatrix.com/trac/ticket/4840 self.deferred.callback(self.length) else: self.deferred.errback(reason) def readBodyToFile( response: IResponse, stream: BinaryIO, max_size: Optional[int] ) -> defer.Deferred: """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. Args: response: The HTTP response to read from. stream: The file-object to write to. max_size: The maximum file size to allow. Returns: A Deferred which resolves to the length of the read body. """ d = defer.Deferred() response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size)) return d def encode_query_args(args: Optional[Mapping[str, Union[str, List[str]]]]) -> bytes: """ Encodes a map of query arguments to bytes which can be appended to a URL. Args: args: The query arguments, a mapping of string to string or list of strings. Returns: The query arguments encoded as bytes. """ if args is None: return b"" encoded_args = {} for k, vs in args.items(): if isinstance(vs, str): vs = [vs] encoded_args[k] = [v.encode("utf8") for v in vs] query_str = urllib.parse.urlencode(encoded_args, True) return query_str.encode("utf8") class InsecureInterceptableContextFactory(ssl.ContextFactory): """ Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain. Do not use this since it allows an attacker to intercept your communications. """ def __init__(self): self._context = SSL.Context(SSL.SSLv23_METHOD) self._context.set_verify(VERIFY_NONE, lambda *_: None) def getContext(self, hostname=None, port=None): return self._context def creatorForNetloc(self, hostname, port): return self
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_9
crossvul-python_data_bad_1731_1
#!/bin/python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Auth, Mail, PluginManager and various utilities ------------------------------------------------ """ import base64 try: import cPickle as pickle except: import pickle import datetime import thread import logging import sys import glob import os import re import time import traceback import smtplib import urllib import urllib2 import Cookie import cStringIO import ConfigParser import email.utils import random from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset from gluon.contenttype import contenttype from gluon.storage import Storage, StorageList, Settings, Messages from gluon.utils import web2py_uuid from gluon.fileutils import read_file, check_credentials from gluon import * from gluon.contrib.autolinks import expand_one from gluon.contrib.markmin.markmin2html import \ replace_at_urls, replace_autolinks, replace_components from pydal.objects import Row, Set, Query import gluon.serializers as serializers Table = DAL.Table Field = DAL.Field try: # try stdlib (Python 2.6) import json as json_parser except ImportError: try: # try external module import simplejson as json_parser except: # fallback to pure-Python module import gluon.contrib.simplejson as json_parser __all__ = ['Mail', 'Auth', 'Recaptcha', 'Recaptcha2', 'Crud', 'Service', 'Wiki', 'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate'] ### mind there are two loggers here (logger and crud.settings.logger)! logger = logging.getLogger("web2py") DEFAULT = lambda: None def getarg(position, default=None): args = current.request.args if position < 0 and len(args) >= -position: return args[position] elif position >= 0 and len(args) > position: return args[position] else: return default def callback(actions, form, tablename=None): if actions: if tablename and isinstance(actions, dict): actions = actions.get(tablename, []) if not isinstance(actions, (list, tuple)): actions = [actions] [action(form) for action in actions] def validators(*a): b = [] for item in a: if isinstance(item, (list, tuple)): b = b + list(item) else: b.append(item) return b def call_or_redirect(f, *args): if callable(f): redirect(f(*args)) else: redirect(f) def replace_id(url, form): if url: url = url.replace('[id]', str(form.vars.id)) if url[0] == '/' or url[:4] == 'http': return url return URL(url) class Mail(object): """ Class for configuring and sending emails with alternative text / html body, multiple attachments and encryption support Works with SMTP and Google App Engine. Args: server: SMTP server address in address:port notation sender: sender email address login: sender login name and password in login:password notation or None if no authentication is required tls: enables/disables encryption (True by default) In Google App Engine use :: server='gae' For sake of backward compatibility all fields are optional and default to None, however, to be able to send emails at least server and sender must be specified. They are available under following fields:: mail.settings.server mail.settings.sender mail.settings.login mail.settings.timeout = 60 # seconds (default) When server is 'logging', email is logged but not sent (debug mode) Optionally you can use PGP encryption or X509:: mail.settings.cipher_type = None mail.settings.gpg_home = None mail.settings.sign = True mail.settings.sign_passphrase = None mail.settings.encrypt = True mail.settings.x509_sign_keyfile = None mail.settings.x509_sign_certfile = None mail.settings.x509_sign_chainfile = None mail.settings.x509_nocerts = False mail.settings.x509_crypt_certfiles = None cipher_type : None gpg - need a python-pyme package and gpgme lib x509 - smime gpg_home : you can set a GNUPGHOME environment variable to specify home of gnupg sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message (True or False). It defaults to True ... x509 only ... x509_sign_keyfile : the signers private key filename or string containing the key. (PEM format) x509_sign_certfile: the signers certificate filename or string containing the cert. (PEM format) x509_sign_chainfile: sets the optional all-in-one file where you can assemble the certificates of Certification Authorities (CA) which form the certificate chain of email certificate. It can be a string containing the certs to. (PEM format) x509_nocerts : if True then no attached certificate in mail x509_crypt_certfiles: the certificates file or strings to encrypt the messages with can be a file name / string or a list of file names / strings (PEM format) Examples: Create Mail object with authentication data for remote server:: mail = Mail('example.com:25', 'me@example.com', 'me:password') Notice for GAE users: attachments have an automatic content_id='attachment-i' where i is progressive number in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc. """ class Attachment(MIMEBase.MIMEBase): """ Email attachment Args: payload: path to file or file-like object with read() method filename: name of the attachment stored in message; if set to None, it will be fetched from payload path; file-like object payload must have explicit filename specified content_id: id of the attachment; automatically contained within `<` and `>` content_type: content type of the attachment; if set to None, it will be fetched from filename using gluon.contenttype module encoding: encoding of all strings passed to this function (except attachment body) Content ID is used to identify attachments within the html body; in example, attached image with content ID 'photo' may be used in html message as a source of img tag `<img src="cid:photo" />`. Example:: Create attachment from text file:: attachment = Mail.Attachment('/path/to/file.txt') Content-Type: text/plain MIME-Version: 1.0 Content-Disposition: attachment; filename="file.txt" Content-Transfer-Encoding: base64 SOMEBASE64CONTENT= Create attachment from image file with custom filename and cid:: attachment = Mail.Attachment('/path/to/file.png', filename='photo.png', content_id='photo') Content-Type: image/png MIME-Version: 1.0 Content-Disposition: attachment; filename="photo.png" Content-Id: <photo> Content-Transfer-Encoding: base64 SOMEOTHERBASE64CONTENT= """ def __init__( self, payload, filename=None, content_id=None, content_type=None, encoding='utf-8'): if isinstance(payload, str): if filename is None: filename = os.path.basename(payload) payload = read_file(payload, 'rb') else: if filename is None: raise Exception('Missing attachment name') payload = payload.read() filename = filename.encode(encoding) if content_type is None: content_type = contenttype(filename) self.my_filename = filename self.my_payload = payload MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1)) self.set_payload(payload) self['Content-Disposition'] = 'attachment; filename="%s"' % filename if not content_id is None: self['Content-Id'] = '<%s>' % content_id.encode(encoding) Encoders.encode_base64(self) def __init__(self, server=None, sender=None, login=None, tls=True): settings = self.settings = Settings() settings.server = server settings.sender = sender settings.login = login settings.tls = tls settings.timeout = 60 # seconds settings.hostname = None settings.ssl = False settings.cipher_type = None settings.gpg_home = None settings.sign = True settings.sign_passphrase = None settings.encrypt = True settings.x509_sign_keyfile = None settings.x509_sign_certfile = None settings.x509_sign_chainfile = None settings.x509_nocerts = False settings.x509_crypt_certfiles = None settings.debug = False settings.lock_keys = True self.result = {} self.error = None def send(self, to, subject='[no subject]', message='[no message]', attachments=None, cc=None, bcc=None, reply_to=None, sender=None, encoding='utf-8', raw=False, headers={}, from_address=None, cipher_type=None, sign=None, sign_passphrase=None, encrypt=None, x509_sign_keyfile=None, x509_sign_chainfile=None, x509_sign_certfile=None, x509_crypt_certfiles=None, x509_nocerts=None ): """ Sends an email using data specified in constructor Args: to: list or tuple of receiver addresses; will also accept single object subject: subject of the email message: email body text; depends on type of passed object: - if 2-list or 2-tuple is passed: first element will be source of plain text while second of html text; - otherwise: object will be the only source of plain text and html source will be set to None If text or html source is: - None: content part will be ignored, - string: content part will be set to it, - file-like object: content part will be fetched from it using it's read() method attachments: list or tuple of Mail.Attachment objects; will also accept single object cc: list or tuple of carbon copy receiver addresses; will also accept single object bcc: list or tuple of blind carbon copy receiver addresses; will also accept single object reply_to: address to which reply should be composed encoding: encoding of all strings passed to this method (including message bodies) headers: dictionary of headers to refine the headers just before sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}` from_address: address to appear in the 'From:' header, this is not the envelope sender. If not specified the sender will be used cipher_type : gpg - need a python-pyme package and gpgme lib x509 - smime gpg_home : you can set a GNUPGHOME environment variable to specify home of gnupg sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message (True or False). It defaults to True. ... x509 only ... x509_sign_keyfile : the signers private key filename or string containing the key. (PEM format) x509_sign_certfile: the signers certificate filename or string containing the cert. (PEM format) x509_sign_chainfile: sets the optional all-in-one file where you can assemble the certificates of Certification Authorities (CA) which form the certificate chain of email certificate. It can be a string containing the certs to. (PEM format) x509_nocerts : if True then no attached certificate in mail x509_crypt_certfiles: the certificates file or strings to encrypt the messages with can be a file name / string or a list of file names / strings (PEM format) Examples: Send plain text message to single address:: mail.send('you@example.com', 'Message subject', 'Plain text body of the message') Send html message to single address:: mail.send('you@example.com', 'Message subject', '<html>Plain text body of the message</html>') Send text and html message to three addresses (two in cc):: mail.send('you@example.com', 'Message subject', ('Plain text body', '<html>html body</html>'), cc=['other1@example.com', 'other2@example.com']) Send html only message with image attachment available from the message by 'photo' content id:: mail.send('you@example.com', 'Message subject', (None, '<html><img src="cid:photo" /></html>'), Mail.Attachment('/path/to/photo.jpg' content_id='photo')) Send email with two attachments and no body text:: mail.send('you@example.com, 'Message subject', None, [Mail.Attachment('/path/to/fist.file'), Mail.Attachment('/path/to/second.file')]) Returns: True on success, False on failure. Before return, method updates two object's fields: - self.result: return value of smtplib.SMTP.sendmail() or GAE's mail.send_mail() method - self.error: Exception message or None if above was successful """ # We don't want to use base64 encoding for unicode mail Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8') def encode_header(key): if [c for c in key if 32 > ord(c) or ord(c) > 127]: return Header.Header(key.encode('utf-8'), 'utf-8') else: return key # encoded or raw text def encoded_or_raw(text): if raw: text = encode_header(text) return text sender = sender or self.settings.sender if not isinstance(self.settings.server, str): raise Exception('Server address not specified') if not isinstance(sender, str): raise Exception('Sender address not specified') if not raw and attachments: # Use multipart/mixed if there is attachments payload_in = MIMEMultipart.MIMEMultipart('mixed') elif raw: # no encoding configuration for raw messages if not isinstance(message, basestring): message = message.read() if isinstance(message, unicode): text = message.encode('utf-8') elif not encoding == 'utf-8': text = message.decode(encoding).encode('utf-8') else: text = message # No charset passed to avoid transport encoding # NOTE: some unicode encoded strings will produce # unreadable mail contents. payload_in = MIMEText.MIMEText(text) if to: if not isinstance(to, (list, tuple)): to = [to] else: raise Exception('Target receiver address not specified') if cc: if not isinstance(cc, (list, tuple)): cc = [cc] if bcc: if not isinstance(bcc, (list, tuple)): bcc = [bcc] if message is None: text = html = None elif isinstance(message, (list, tuple)): text, html = message elif message.strip().startswith('<html') and \ message.strip().endswith('</html>'): text = self.settings.server == 'gae' and message or None html = message else: text = message html = None if (not text is None or not html is None) and (not raw): if not text is None: if not isinstance(text, basestring): text = text.read() if isinstance(text, unicode): text = text.encode('utf-8') elif not encoding == 'utf-8': text = text.decode(encoding).encode('utf-8') if not html is None: if not isinstance(html, basestring): html = html.read() if isinstance(html, unicode): html = html.encode('utf-8') elif not encoding == 'utf-8': html = html.decode(encoding).encode('utf-8') # Construct mime part only if needed if text is not None and html: # We have text and html we need multipart/alternative attachment = MIMEMultipart.MIMEMultipart('alternative') attachment.attach(MIMEText.MIMEText(text, _charset='utf-8')) attachment.attach( MIMEText.MIMEText(html, 'html', _charset='utf-8')) elif text is not None: attachment = MIMEText.MIMEText(text, _charset='utf-8') elif html: attachment = \ MIMEText.MIMEText(html, 'html', _charset='utf-8') if attachments: # If there is attachments put text and html into # multipart/mixed payload_in.attach(attachment) else: # No attachments no multipart/mixed payload_in = attachment if (attachments is None) or raw: pass elif isinstance(attachments, (list, tuple)): for attachment in attachments: payload_in.attach(attachment) else: payload_in.attach(attachments) ####################################################### # CIPHER # ####################################################### cipher_type = cipher_type or self.settings.cipher_type sign = sign if sign != None else self.settings.sign sign_passphrase = sign_passphrase or self.settings.sign_passphrase encrypt = encrypt if encrypt != None else self.settings.encrypt ####################################################### # GPGME # ####################################################### if cipher_type == 'gpg': if self.settings.gpg_home: # Set GNUPGHOME environment variable to set home of gnupg import os os.environ['GNUPGHOME'] = self.settings.gpg_home if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to gpg" return False # need a python-pyme package and gpgme lib from pyme import core, errors from pyme.constants.sig import mode ############################################ # sign # ############################################ if sign: import string core.check_version(None) pin = string.replace(payload_in.as_string(), '\n', '\r\n') plain = core.Data(pin) sig = core.Data() c = core.Context() c.set_armor(1) c.signers_clear() # search for signing key for From: for sigkey in c.op_keylist_all(sender, 1): if sigkey.can_sign: c.signers_add(sigkey) if not c.signers_enum(0): self.error = 'No key for signing [%s]' % sender return False c.set_passphrase_cb(lambda x, y, z: sign_passphrase) try: # make a signature c.op_sign(plain, sig, mode.DETACH) sig.seek(0, 0) # make it part of the email payload = MIMEMultipart.MIMEMultipart('signed', boundary=None, _subparts=None, **dict( micalg="pgp-sha1", protocol="application/pgp-signature")) # insert the origin payload payload.attach(payload_in) # insert the detached signature p = MIMEBase.MIMEBase("application", 'pgp-signature') p.set_payload(sig.read()) payload.attach(p) # it's just a trick to handle the no encryption case payload_in = payload except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ############################################ # encrypt # ############################################ if encrypt: core.check_version(None) plain = core.Data(payload_in.as_string()) cipher = core.Data() c = core.Context() c.set_armor(1) # collect the public keys for encryption recipients = [] rec = to[:] if cc: rec.extend(cc) if bcc: rec.extend(bcc) for addr in rec: c.op_keylist_start(addr, 0) r = c.op_keylist_next() if r is None: self.error = 'No key for [%s]' % addr return False recipients.append(r) try: # make the encryption c.op_encrypt(recipients, 1, plain, cipher) cipher.seek(0, 0) # make it a part of the email payload = MIMEMultipart.MIMEMultipart('encrypted', boundary=None, _subparts=None, **dict(protocol="application/pgp-encrypted")) p = MIMEBase.MIMEBase("application", 'pgp-encrypted') p.set_payload("Version: 1\r\n") payload.attach(p) p = MIMEBase.MIMEBase("application", 'octet-stream') p.set_payload(cipher.read()) payload.attach(p) except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ####################################################### # X.509 # ####################################################### elif cipher_type == 'x509': if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to x509" return False import os x509_sign_keyfile = x509_sign_keyfile or\ self.settings.x509_sign_keyfile x509_sign_chainfile = x509_sign_chainfile or\ self.settings.x509_sign_chainfile x509_sign_certfile = x509_sign_certfile or\ self.settings.x509_sign_certfile or\ x509_sign_keyfile or\ self.settings.x509_sign_certfile # crypt certfiles could be a string or a list x509_crypt_certfiles = x509_crypt_certfiles or\ self.settings.x509_crypt_certfiles x509_nocerts = x509_nocerts or\ self.settings.x509_nocerts # need m2crypto try: from M2Crypto import BIO, SMIME, X509 except Exception, e: self.error = "Can't load M2Crypto module" return False msg_bio = BIO.MemoryBuffer(payload_in.as_string()) s = SMIME.SMIME() # SIGN if sign: # key for signing try: keyfile_bio = BIO.openfile(x509_sign_keyfile)\ if os.path.isfile(x509_sign_keyfile)\ else BIO.MemoryBuffer(x509_sign_keyfile) sign_certfile_bio = BIO.openfile(x509_sign_certfile)\ if os.path.isfile(x509_sign_certfile)\ else BIO.MemoryBuffer(x509_sign_certfile) s.load_key_bio(keyfile_bio, sign_certfile_bio, callback=lambda x: sign_passphrase) if x509_sign_chainfile: sk = X509.X509_Stack() chain = X509.load_cert(x509_sign_chainfile)\ if os.path.isfile(x509_sign_chainfile)\ else X509.load_cert_string(x509_sign_chainfile) sk.push(chain) s.set_x509_stack(sk) except Exception, e: self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e) return False try: if x509_nocerts: flags = SMIME.PKCS7_NOCERTS else: flags = 0 if not encrypt: flags += SMIME.PKCS7_DETACHED p7 = s.sign(msg_bio, flags=flags) msg_bio = BIO.MemoryBuffer(payload_in.as_string( )) # Recreate coz sign() has consumed it. except Exception, e: self.error = "Something went wrong on signing: <%s> %s" % ( str(e), str(flags)) return False # ENCRYPT if encrypt: try: sk = X509.X509_Stack() if not isinstance(x509_crypt_certfiles, (list, tuple)): x509_crypt_certfiles = [x509_crypt_certfiles] # make an encryption cert's stack for crypt_certfile in x509_crypt_certfiles: certfile = X509.load_cert(crypt_certfile)\ if os.path.isfile(crypt_certfile)\ else X509.load_cert_string(crypt_certfile) sk.push(certfile) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp_bio = BIO.MemoryBuffer() if sign: s.write(tmp_bio, p7) else: tmp_bio.write(payload_in.as_string()) p7 = s.encrypt(tmp_bio) except Exception, e: self.error = "Something went wrong on encrypting: <%s>" % str(e) return False # Final stage in sign and encryption out = BIO.MemoryBuffer() if encrypt: s.write(out, p7) else: if sign: s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED) else: out.write('\r\n') out.write(payload_in.as_string()) out.close() st = str(out.read()) payload = message_from_string(st) else: # no cryptography process as usual payload = payload_in if from_address: payload['From'] = encoded_or_raw(from_address.decode(encoding)) else: payload['From'] = encoded_or_raw(sender.decode(encoding)) origTo = to[:] if to: payload['To'] = encoded_or_raw(', '.join(to).decode(encoding)) if reply_to: payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding)) if cc: payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding)) to.extend(cc) if bcc: to.extend(bcc) payload['Subject'] = encoded_or_raw(subject.decode(encoding)) payload['Date'] = email.utils.formatdate() for k, v in headers.iteritems(): payload[k] = encoded_or_raw(v.decode(encoding)) result = {} try: if self.settings.server == 'logging': logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % ('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)) elif self.settings.server == 'gae': xcc = dict() if cc: xcc['cc'] = cc if bcc: xcc['bcc'] = bcc if reply_to: xcc['reply_to'] = reply_to from google.appengine.api import mail attachments = attachments and [mail.Attachment( a.my_filename, a.my_payload, contebt_id='<attachment-%s>' % k ) for k,a in enumerate(attachments) if not raw] if attachments: result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), html=html, attachments=attachments, **xcc) elif html and (not raw): result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), html=html, **xcc) else: result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), **xcc) else: smtp_args = self.settings.server.split(':') kwargs = dict(timeout=self.settings.timeout) if self.settings.ssl: server = smtplib.SMTP_SSL(*smtp_args, **kwargs) else: server = smtplib.SMTP(*smtp_args, **kwargs) if self.settings.tls and not self.settings.ssl: server.ehlo(self.settings.hostname) server.starttls() server.ehlo(self.settings.hostname) if self.settings.login: server.login(*self.settings.login.split(':', 1)) result = server.sendmail( sender, to, payload.as_string()) server.quit() except Exception, e: logger.warn('Mail.send failure:%s' % e) self.result = result self.error = e return False self.result = result self.error = None return True class Recaptcha(DIV): """ Examples: Use as:: form = FORM(Recaptcha(public_key='...',private_key='...')) or:: form = SQLFORM(...) form.append(Recaptcha(public_key='...',private_key='...')) """ API_SSL_SERVER = 'https://www.google.com/recaptcha/api' API_SERVER = 'http://www.google.com/recaptcha/api' VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify' def __init__(self, request=None, public_key='', private_key='', use_ssl=False, error=None, error_message='invalid', label='Verify:', options='', comment='', ajax=False ): request = request or current.request self.request_vars = request and request.vars or current.request.vars self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.use_ssl = use_ssl self.error = error self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options self.comment = comment self.ajax = ajax def _validate(self): # for local testing: recaptcha_challenge_field = \ self.request_vars.recaptcha_challenge_field recaptcha_response_field = \ self.request_vars.recaptcha_response_field private_key = self.private_key remoteip = self.remote_addr if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'privatekey': private_key, 'remoteip': remoteip, 'challenge': recaptcha_challenge_field, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) return_values = httpresp.read().splitlines() httpresp.close() return_code = return_values[0] if return_code == 'true': del self.request_vars.recaptcha_challenge_field del self.request_vars.recaptcha_response_field self.request_vars.captcha = '' return True else: # In case we get an error code, store it so we can get an error message # from the /api/challenge URL as described in the reCAPTCHA api docs. self.error = return_values[1] self.errors['captcha'] = self.error_message return False def xml(self): public_key = self.public_key use_ssl = self.use_ssl error_param = '' if self.error: error_param = '&error=%s' % self.error if use_ssl: server = self.API_SSL_SERVER else: server = self.API_SERVER if not self.ajax: captcha = DIV( SCRIPT("var RecaptchaOptions = {%s};" % self.options), SCRIPT(_type="text/javascript", _src="%s/challenge?k=%s%s" % (server, public_key, error_param)), TAG.noscript( IFRAME( _src="%s/noscript?k=%s%s" % ( server, public_key, error_param), _height="300", _width="500", _frameborder="0"), BR(), INPUT( _type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') else: #use Google's ajax interface, needed for LOADed components url_recaptcha_js = "%s/js/recaptcha_ajax.js" % server RecaptchaOptions = "var RecaptchaOptions = {%s}" % self.options script = """%(options)s; jQuery.getScript('%(url)s',function() { Recaptcha.create('%(public_key)s', 'recaptcha',jQuery.extend(RecaptchaOptions,{'callback':Recaptcha.focus_response_field})) }) """ % ({'options': RecaptchaOptions, 'url': url_recaptcha_js, 'public_key': public_key}) captcha = DIV( SCRIPT( script, _type="text/javascript", ), TAG.noscript( IFRAME( _src="%s/noscript?k=%s%s" % ( server, public_key, error_param), _height="300", _width="500", _frameborder="0"), BR(), INPUT( _type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() class Recaptcha2(DIV): """ Experimental: Creates a DIV holding the newer Recaptcha from Google (v2) Args: request : the request. If not passed, uses current request public_key : the public key Google gave you private_key : the private key Google gave you error_message : the error message to show if verification fails label : the label to use options (dict) : takes these parameters - hl - theme - type - tabindex - callback - expired-callback see https://developers.google.com/recaptcha/docs/display for docs about those comment : the comment Examples: Use as:: form = FORM(Recaptcha2(public_key='...',private_key='...')) or:: form = SQLFORM(...) form.append(Recaptcha2(public_key='...',private_key='...')) to protect the login page instead, use:: from gluon.tools import Recaptcha2 auth.settings.captcha = Recaptcha2(request, public_key='...',private_key='...') """ API_URI = 'https://www.google.com/recaptcha/api.js' VERIFY_SERVER = 'https://www.google.com/recaptcha/api/siteverify' def __init__(self, request=None, public_key='', private_key='', error_message='invalid', label='Verify:', options=None, comment='', ): request = request or current.request self.request_vars = request and request.vars or current.request.vars self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options or {} self.comment = comment def _validate(self): recaptcha_response_field = self.request_vars.pop('g-recaptcha-response', None) remoteip = self.remote_addr if not recaptcha_response_field: self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'secret': self.private_key, 'remoteip': remoteip, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) content = httpresp.read() httpresp.close() try: response_dict = json_parser.loads(content) except: self.errors['captcha'] = self.error_message return False if response_dict.get('success', False): self.request_vars.captcha = '' return True else: self.errors['captcha'] = self.error_message return False def xml(self): api_uri = self.API_URI hl = self.options.pop('hl', None) if hl: api_uri = self.API_URI + '?hl=%s' % hl public_key = self.public_key self.options['sitekey'] = public_key captcha = DIV( SCRIPT(_src=api_uri, _async='', _defer=''), DIV(_class="g-recaptcha", data=self.options), TAG.noscript(XML(""" <div style="width: 302px; height: 352px;"> <div style="width: 302px; height: 352px; position: relative;"> <div style="width: 302px; height: 352px; position: absolute;"> <iframe src="https://www.google.com/recaptcha/api/fallback?k=%(public_key)s" frameborder="0" scrolling="no" style="width: 302px; height:352px; border-style: none;"> </iframe> </div> <div style="width: 250px; height: 80px; position: absolute; border-style: none; bottom: 21px; left: 25px; margin: 0px; padding: 0px; right: 25px;"> <textarea id="g-recaptcha-response" name="g-recaptcha-response" class="g-recaptcha-response" style="width: 250px; height: 80px; border: 1px solid #c1c1c1; margin: 0px; padding: 0px; resize: none;" value=""> </textarea> </div> </div> </div>""" % dict(public_key=public_key)) ) ) if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() # this should only be used for captcha and perhaps not even for that def addrow(form, a, b, c, style, _id, position=-1): if style == "divs": form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "table2cols": form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(c, _class='w2p_fc'))) form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'), _colspan=2, _id=_id)) elif style == "ul": form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "bootstrap": form[0].insert(position, DIV(LABEL(a, _class='control-label'), DIV(b, SPAN(c, _class='inline-help'), _class='controls'), _class='control-group', _id=_id)) elif style == "bootstrap3_inline": form[0].insert(position, DIV(LABEL(a, _class='control-label col-sm-3'), DIV(b, SPAN(c, _class='help-block'), _class='col-sm-9'), _class='form-group', _id=_id)) elif style == "bootstrap3_stacked": form[0].insert(position, DIV(LABEL(a, _class='control-label'), b, SPAN(c, _class='help-block'), _class='form-group', _id=_id)) else: form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(b, _class='w2p_fw'), TD(c, _class='w2p_fc'), _id=_id)) class Auth(object): default_settings = dict( hideerror=False, password_min_length=4, cas_maps=None, reset_password_requires_verification=False, registration_requires_verification=False, registration_requires_approval=False, bulk_register_enabled=False, login_after_registration=False, login_after_password_change=True, alternate_requires_registration=False, create_user_groups="user_%(id)s", everybody_group_id=None, manager_actions={}, auth_manager_role=None, two_factor_authentication_group = None, login_captcha=None, register_captcha=None, pre_registration_div=None, retrieve_username_captcha=None, retrieve_password_captcha=None, captcha=None, prevent_open_redirect_attacks=True, prevent_password_reset_attacks=True, expiration=3600, # one hour long_expiration=3600 * 30 * 24, # one month remember_me_form=True, allow_basic_login=False, allow_basic_login_only=False, on_failed_authentication=lambda x: redirect(x), formstyle=None, label_separator=None, logging_enabled = True, allow_delete_accounts=False, password_field='password', table_user_name='auth_user', table_group_name='auth_group', table_membership_name='auth_membership', table_permission_name='auth_permission', table_event_name='auth_event', table_cas_name='auth_cas', table_token_name='auth_token', table_user=None, table_group=None, table_membership=None, table_permission=None, table_event=None, table_cas=None, showid=False, use_username=False, login_email_validate=True, login_userfield=None, multi_login=False, logout_onlogout=None, register_fields=None, register_verify_password=True, profile_fields=None, email_case_sensitive=True, username_case_sensitive=True, update_fields=['email'], ondelete="CASCADE", client_side=True, renew_session_onlogin=True, renew_session_onlogout=True, keep_session_onlogin=True, keep_session_onlogout=False, wiki=Settings(), ) # ## these are messages that can be customized default_messages = dict( login_button='Log In', register_button='Sign Up', password_reset_button='Request reset password', password_change_button='Change password', profile_save_button='Apply changes', submit_button='Submit', verify_password='Verify Password', delete_label='Check to delete', function_disabled='Function disabled', access_denied='Insufficient privileges', registration_verifying='Registration needs verification', registration_pending='Registration is pending approval', email_taken='This email already has an account', invalid_username='Invalid username', username_taken='Username already taken', login_disabled='Login disabled by administrator', logged_in='Logged in', email_sent='Email sent', unable_to_send_email='Unable to send email', email_verified='Email verified', logged_out='Logged out', registration_successful='Registration successful', invalid_email='Invalid email', unable_send_email='Unable to send email', invalid_login='Invalid login', invalid_user='Invalid user', invalid_password='Invalid password', is_empty="Cannot be empty", mismatched_password="Password fields don't match", verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email', verify_email_subject='Email verification', username_sent='Your username was emailed to you', new_password_sent='A new password was emailed to you', password_changed='Password changed', retrieve_username='Your username is: %(username)s', retrieve_username_subject='Username retrieve', retrieve_password='Your password is: %(password)s', retrieve_password_subject='Password retrieve', reset_password='Click on the link %(link)s to reset your password', reset_password_subject='Password reset', bulk_invite_subject='Invitation to join%(site)s', bulk_invite_body='You have been invited to join %(site)s, click %(link)s to complete the process', invalid_reset_password='Invalid reset password', profile_updated='Profile updated', new_password='New password', old_password='Old password', group_description='Group uniquely assigned to user %(id)s', register_log='User %(id)s Registered', login_log='User %(id)s Logged-in', login_failed_log=None, logout_log='User %(id)s Logged-out', profile_log='User %(id)s Profile updated', verify_email_log='User %(id)s Verification email sent', retrieve_username_log='User %(id)s Username retrieved', retrieve_password_log='User %(id)s Password retrieved', reset_password_log='User %(id)s Password reset', change_password_log='User %(id)s Password changed', add_group_log='Group %(group_id)s created', del_group_log='Group %(group_id)s deleted', add_membership_log=None, del_membership_log=None, has_membership_log=None, add_permission_log=None, del_permission_log=None, has_permission_log=None, impersonate_log='User %(id)s is impersonating %(other_id)s', label_first_name='First name', label_last_name='Last name', label_username='Username', label_email='E-mail', label_password='Password', label_registration_key='Registration key', label_reset_password_key='Reset Password key', label_registration_id='Registration identifier', label_role='Role', label_description='Description', label_user_id='User ID', label_group_id='Group ID', label_name='Name', label_table_name='Object or table name', label_record_id='Record ID', label_time_stamp='Timestamp', label_client_ip='Client IP', label_origin='Origin', label_remember_me="Remember me (for 30 days)", verify_password_comment='please input your password again', ) """ Class for authentication, authorization, role based access control. Includes: - registration and profile - login and logout - username and password retrieval - event logging - role creation and assignment - user defined group/role based permission Args: environment: is there for legacy but unused (awful) db: has to be the database where to create tables for authentication mailer: `Mail(...)` or None (no mailer) or True (make a mailer) hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key() controller: (where is the user action?) cas_provider: (delegate authentication to the URL, CAS2) Authentication Example:: from gluon.contrib.utils import * mail=Mail() mail.settings.server='smtp.gmail.com:587' mail.settings.sender='you@somewhere.com' mail.settings.login='username:password' auth=Auth(db) auth.settings.mailer=mail # auth.settings....=... auth.define_tables() def authentication(): return dict(form=auth()) Exposes: - `http://.../{application}/{controller}/authentication/login` - `http://.../{application}/{controller}/authentication/logout` - `http://.../{application}/{controller}/authentication/register` - `http://.../{application}/{controller}/authentication/verify_email` - `http://.../{application}/{controller}/authentication/retrieve_username` - `http://.../{application}/{controller}/authentication/retrieve_password` - `http://.../{application}/{controller}/authentication/reset_password` - `http://.../{application}/{controller}/authentication/profile` - `http://.../{application}/{controller}/authentication/change_password` On registration a group with role=new_user.id is created and user is given membership of this group. You can create a group with:: group_id=auth.add_group('Manager', 'can access the manage action') auth.add_permission(group_id, 'access to manage') Here "access to manage" is just a user defined string. You can give access to a user:: auth.add_membership(group_id, user_id) If user id is omitted, the logged in user is assumed Then you can decorate any action:: @auth.requires_permission('access to manage') def manage(): return dict() You can restrict a permission to a specific table:: auth.add_permission(group_id, 'edit', db.sometable) @auth.requires_permission('edit', db.sometable) Or to a specific record:: auth.add_permission(group_id, 'edit', db.sometable, 45) @auth.requires_permission('edit', db.sometable, 45) If authorization is not granted calls:: auth.settings.on_failed_authorization Other options:: auth.settings.mailer=None auth.settings.expiration=3600 # seconds ... ### these are messages that can be customized ... """ @staticmethod def get_or_create_key(filename=None, alg='sha512'): request = current.request if not filename: filename = os.path.join(request.folder, 'private', 'auth.key') if os.path.exists(filename): key = open(filename, 'r').read().strip() else: key = alg + ':' + web2py_uuid() open(filename, 'w').write(key) return key def url(self, f=None, args=None, vars=None, scheme=False): if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars, scheme=scheme) def here(self): return URL(args=current.request.args, vars=current.request.get_vars) def __init__(self, environment=None, db=None, mailer=True, hmac_key=None, controller='default', function='user', cas_provider=None, signature=True, secure=False, csrf_prevention=True, propagate_extension=None, url_index=None): ## next two lines for backward compatibility if not db and environment and isinstance(environment, DAL): db = environment self.db = db self.environment = current self.csrf_prevention = csrf_prevention request = current.request session = current.session auth = session.auth self.user_groups = auth and auth.user_groups or {} if secure: request.requires_https() now = request.now # if we have auth info # if not expired it, used it # if expired, clear the session # else, only clear auth info in the session if auth: delta = datetime.timedelta(days=0, seconds=auth.expiration) if auth.last_visit and auth.last_visit + delta > now: self.user = auth.user # this is a trick to speed up sessions to avoid many writes if (now - auth.last_visit).seconds > (auth.expiration / 10): auth.last_visit = request.now else: self.user = None if session.auth: del session.auth session.renew(clear_session=True) else: self.user = None if session.auth: del session.auth # ## what happens after login? url_index = url_index or URL(controller, 'index') url_login = URL(controller, function, args='login', extension = propagate_extension) # ## what happens after registration? settings = self.settings = Settings() settings.update(Auth.default_settings) settings.update( cas_domains=[request.env.http_host], enable_tokens=False, cas_provider=cas_provider, cas_actions=dict(login='login', validate='validate', servicevalidate='serviceValidate', proxyvalidate='proxyValidate', logout='logout'), extra_fields={}, actions_disabled=[], controller=controller, function=function, login_url=url_login, logged_url=URL(controller, function, args='profile'), download_url=URL(controller, 'download'), mailer=(mailer is True) and Mail() or mailer, on_failed_authorization = URL(controller, function, args='not_authorized'), login_next = url_index, login_onvalidation = [], login_onaccept = [], login_onfail = [], login_methods = [self], login_form = self, logout_next = url_index, logout_onlogout = None, register_next = url_index, register_onvalidation = [], register_onaccept = [], verify_email_next = url_login, verify_email_onaccept = [], profile_next = url_index, profile_onvalidation = [], profile_onaccept = [], retrieve_username_next = url_index, retrieve_password_next = url_index, request_reset_password_next = url_login, reset_password_next = url_index, change_password_next = url_index, change_password_onvalidation = [], change_password_onaccept = [], retrieve_password_onvalidation = [], request_reset_password_onvalidation = [], request_reset_password_onaccept = [], reset_password_onvalidation = [], reset_password_onaccept = [], hmac_key = hmac_key, formstyle = current.response.formstyle, label_separator = current.response.form_label_separator ) settings.lock_keys = True # ## these are messages that can be customized messages = self.messages = Messages(current.T) messages.update(Auth.default_messages) messages.update(ajax_failed_authentication= DIV(H4('NOT AUTHORIZED'), 'Please ', A('login', _href=self.settings.login_url + ('?_next=' + urllib.quote(current.request.env.http_web2py_component_location)) if current.request.env.http_web2py_component_location else ''), ' to view this content.', _class='not-authorized alert alert-block')) messages.lock_keys = True # for "remember me" option response = current.response if auth and auth.remember_me: # when user wants to be logged in for longer response.session_cookie_expires = auth.expiration if signature: self.define_signature() else: self.signature = None def get_vars_next(self): next = current.request.vars._next if isinstance(next, (list, tuple)): next = next[0] return next def _get_user_id(self): """accessor for auth.user_id""" return self.user and self.user.id or None user_id = property(_get_user_id, doc="user.id or None") def table_user(self): return self.db[self.settings.table_user_name] def table_group(self): return self.db[self.settings.table_group_name] def table_membership(self): return self.db[self.settings.table_membership_name] def table_permission(self): return self.db[self.settings.table_permission_name] def table_event(self): return self.db[self.settings.table_event_name] def table_cas(self): return self.db[self.settings.table_cas_name] def table_token(self): return self.db[self.settings.table_token_name] def _HTTP(self, *a, **b): """ only used in lambda: self._HTTP(404) """ raise HTTP(*a, **b) def __call__(self): """ Example: Use as:: def authentication(): return dict(form=auth()) """ request = current.request args = request.args if not args: redirect(self.url(args='login', vars=request.vars)) elif args[0] in self.settings.actions_disabled: raise HTTP(404) if args[0] in ('login', 'logout', 'register', 'verify_email', 'retrieve_username', 'retrieve_password', 'reset_password', 'request_reset_password', 'change_password', 'profile', 'groups', 'impersonate', 'not_authorized', 'confirm_registration', 'bulk_register','manage_tokens'): if len(request.args) >= 2 and args[0] == 'impersonate': return getattr(self, args[0])(request.args[1]) else: return getattr(self, args[0])() elif args[0] == 'cas' and not self.settings.cas_provider: if args(1) == self.settings.cas_actions['login']: return self.cas_login(version=2) elif args(1) == self.settings.cas_actions['validate']: return self.cas_validate(version=1) elif args(1) == self.settings.cas_actions['servicevalidate']: return self.cas_validate(version=2, proxy=False) elif args(1) == self.settings.cas_actions['proxyvalidate']: return self.cas_validate(version=2, proxy=True) elif args(1) == self.settings.cas_actions['logout']: return self.logout(next=request.vars.service or DEFAULT) else: raise HTTP(404) def navbar(self, prefix='Welcome', action=None, separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT, referrer_actions=DEFAULT, mode='default'): """ Navbar with support for more templates This uses some code from the old navbar. Args: mode: see options for list of """ items = [] # Hold all menu items in a list self.bar = '' # The final T = current.T referrer_actions = [] if not referrer_actions else referrer_actions if not action: action = self.url(self.settings.function) request = current.request if URL() == action: next = '' else: next = '?_next=' + urllib.quote(URL(args=request.args, vars=request.get_vars)) href = lambda function: '%s/%s%s' % (action, function, next if referrer_actions is DEFAULT or function in referrer_actions else '') if isinstance(prefix, str): prefix = T(prefix) if prefix: prefix = prefix.strip() + ' ' def Anr(*a, **b): b['_rel'] = 'nofollow' return A(*a, **b) if self.user_id: # User is logged in logout_next = self.settings.logout_next items.append({'name': T('Log Out'), 'href': '%s/logout?_next=%s' % (action, urllib.quote( logout_next)), 'icon': 'icon-off'}) if not 'profile' in self.settings.actions_disabled: items.append({'name': T('Profile'), 'href': href('profile'), 'icon': 'icon-user'}) if not 'change_password' in self.settings.actions_disabled: items.append({'name': T('Password'), 'href': href('change_password'), 'icon': 'icon-lock'}) if user_identifier is DEFAULT: user_identifier = '%(first_name)s' if callable(user_identifier): user_identifier = user_identifier(self.user) elif ((isinstance(user_identifier, str) or type(user_identifier).__name__ == 'lazyT') and re.search(r'%\(.+\)s', user_identifier)): user_identifier = user_identifier % self.user if not user_identifier: user_identifier = '' else: # User is not logged in items.append({'name': T('Log In'), 'href': href('login'), 'icon': 'icon-off'}) if not 'register' in self.settings.actions_disabled: items.append({'name': T('Sign Up'), 'href': href('register'), 'icon': 'icon-user'}) if not 'request_reset_password' in self.settings.actions_disabled: items.append({'name': T('Lost password?'), 'href': href('request_reset_password'), 'icon': 'icon-lock'}) if (self.settings.use_username and not 'retrieve_username' in self.settings.actions_disabled): items.append({'name': T('Forgot username?'), 'href': href('retrieve_username'), 'icon': 'icon-edit'}) def menu(): # For inclusion in MENU self.bar = [(items[0]['name'], False, items[0]['href'], [])] del items[0] for item in items: self.bar[0][3].append((item['name'], False, item['href'])) def bootstrap3(): # Default web2py scaffolding def rename(icon): return icon+' '+icon.replace('icon', 'glyphicon') self.bar = UL(LI(Anr(I(_class=rename('icon '+items[0]['icon'])), ' ' + items[0]['name'], _href=items[0]['href'])), _class='dropdown-menu') del items[0] for item in items: self.bar.insert(-1, LI(Anr(I(_class=rename('icon '+item['icon'])), ' ' + item['name'], _href=item['href']))) self.bar.insert(-1, LI('', _class='divider')) if self.user_id: self.bar = LI(Anr(prefix, user_identifier, _href='#', _class="dropdown-toggle", data={'toggle': 'dropdown'}), self.bar, _class='dropdown') else: self.bar = LI(Anr(T('Log In'), _href='#', _class="dropdown-toggle", data={'toggle': 'dropdown'}), self.bar, _class='dropdown') def bare(): """ In order to do advanced customization we only need the prefix, the user_identifier and the href attribute of items Examples: Use as:: # in module custom_layout.py from gluon import * def navbar(auth_navbar): bar = auth_navbar user = bar["user"] if not user: btn_login = A(current.T("Login"), _href=bar["login"], _class="btn btn-success", _rel="nofollow") btn_register = A(current.T("Sign up"), _href=bar["register"], _class="btn btn-primary", _rel="nofollow") return DIV(btn_register, btn_login, _class="btn-group") else: toggletext = "%s back %s" % (bar["prefix"], user) toggle = A(toggletext, _href="#", _class="dropdown-toggle", _rel="nofollow", **{"_data-toggle": "dropdown"}) li_profile = LI(A(I(_class="icon-user"), ' ', current.T("Account details"), _href=bar["profile"], _rel="nofollow")) li_custom = LI(A(I(_class="icon-book"), ' ', current.T("My Agenda"), _href="#", rel="nofollow")) li_logout = LI(A(I(_class="icon-off"), ' ', current.T("logout"), _href=bar["logout"], _rel="nofollow")) dropdown = UL(li_profile, li_custom, LI('', _class="divider"), li_logout, _class="dropdown-menu", _role="menu") return LI(toggle, dropdown, _class="dropdown") # in models db.py import custom_layout as custom # in layout.html <ul id="navbar" class="nav pull-right"> {{='auth' in globals() and \ custom.navbar(auth.navbar(mode='bare')) or ''}}</ul> """ bare = {} bare['prefix'] = prefix bare['user'] = user_identifier if self.user_id else None for i in items: if i['name'] == T('Log In'): k = 'login' elif i['name'] == T('Sign Up'): k = 'register' elif i['name'] == T('Lost password?'): k = 'request_reset_password' elif i['name'] == T('Forgot username?'): k = 'retrieve_username' elif i['name'] == T('Log Out'): k = 'logout' elif i['name'] == T('Profile'): k = 'profile' elif i['name'] == T('Password'): k = 'change_password' bare[k] = i['href'] self.bar = bare options = {'asmenu': menu, 'dropdown': bootstrap3, 'bare': bare } # Define custom modes. if mode in options and callable(options[mode]): options[mode]() else: s1, s2, s3 = separators if self.user_id: self.bar = SPAN(prefix, user_identifier, s1, Anr(items[0]['name'], _href=items[0]['href']), s3, _class='auth_navbar') else: self.bar = SPAN(s1, Anr(items[0]['name'], _href=items[0]['href']), s3, _class='auth_navbar') for item in items[1:]: self.bar.insert(-1, s2) self.bar.insert(-1, Anr(item['name'], _href=item['href'])) return self.bar def __get_migrate(self, tablename, migrate=True): if type(migrate).__name__ == 'str': return (migrate + tablename + '.table') elif migrate == False: return False else: return True def enable_record_versioning(self, tables, archive_db=None, archive_names='%(tablename)s_archive', current_record='current_record', current_record_label=None): """ Used to enable full record versioning (including auth tables):: auth = Auth(db) auth.define_tables(signature=True) # define our own tables db.define_table('mything',Field('name'),auth.signature) auth.enable_record_versioning(tables=db) tables can be the db (all table) or a list of tables. only tables with modified_by and modified_on fiels (as created by auth.signature) will have versioning. Old record versions will be in table 'mything_archive' automatically defined. when you enable enable_record_versioning, records are never deleted but marked with is_active=False. enable_record_versioning enables a common_filter for every table that filters out records with is_active = False Note: If you use auth.enable_record_versioning, do not use auth.archive or you will end up with duplicates. auth.archive does explicitly what enable_record_versioning does automatically. """ current_record_label = current_record_label or current.T( current_record.replace('_', ' ').title()) for table in tables: fieldnames = table.fields() if ('id' in fieldnames and 'modified_on' in fieldnames and not current_record in fieldnames): table._enable_record_versioning( archive_db=archive_db, archive_name=archive_names, current_record=current_record, current_record_label=current_record_label) def define_signature(self): db = self.db settings = self.settings request = current.request T = current.T reference_user = 'reference %s' % settings.table_user_name def lazy_user(auth=self): return auth.user_id def represent(id, record=None, s=settings): try: user = s.table_user(id) return '%s %s' % (user.get("first_name", user.get("email")), user.get("last_name", '')) except: return id ondelete = self.settings.ondelete self.signature = Table( self.db, 'auth_signature', Field('is_active', 'boolean', default=True, readable=False, writable=False, label=T('Is Active')), Field('created_on', 'datetime', default=request.now, writable=False, readable=False, label=T('Created On')), Field('created_by', reference_user, default=lazy_user, represent=represent, writable=False, readable=False, label=T('Created By'), ondelete=ondelete), Field('modified_on', 'datetime', update=request.now, default=request.now, writable=False, readable=False, label=T('Modified On')), Field('modified_by', reference_user, represent=represent, default=lazy_user, update=lazy_user, writable=False, readable=False, label=T('Modified By'), ondelete=ondelete)) def define_tables(self, username=None, signature=None, enable_tokens=False, migrate=None, fake_migrate=None): """ To be called unless tables are defined manually Examples: Use as:: # defines all needed tables and table files # 'myprefix_auth_user.table', ... auth.define_tables(migrate='myprefix_') # defines all needed tables without migration/table files auth.define_tables(migrate=False) """ db = self.db if migrate is None: migrate = db._migrate if fake_migrate is None: fake_migrate = db._fake_migrate settings = self.settings if username is None: username = settings.use_username else: settings.use_username = username settings.enable_tokens = enable_tokens if not self.signature: self.define_signature() if signature == True: signature_list = [self.signature] elif not signature: signature_list = [] elif isinstance(signature, Table): signature_list = [signature] else: signature_list = signature is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty) is_crypted = CRYPT(key=settings.hmac_key, min_length=settings.password_min_length) is_unique_email = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name, error_message=self.messages.email_taken)] if not settings.email_case_sensitive: is_unique_email.insert(1, IS_LOWER()) if not settings.table_user_name in db.tables: passfield = settings.password_field extra_fields = settings.extra_fields.get( settings.table_user_name, []) + signature_list if username or settings.cas_provider: is_unique_username = \ [IS_MATCH('[\w\.\-]+', strict=True, error_message=self.messages.invalid_username), IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name, error_message=self.messages.username_taken)] if not settings.username_case_sensitive: is_unique_username.insert(1, IS_LOWER()) db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field('username', length=128, default='', label=self.messages.label_username, requires=is_unique_username), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(username)s')) else: db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(first_name)s %(last_name)s (%(id)s)')) reference_table_user = 'reference %s' % settings.table_user_name if not settings.table_group_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_group_name, []) + signature_list db.define_table( settings.table_group_name, Field('role', length=512, default='', label=self.messages.label_role, requires=IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name)), Field('description', 'text', label=self.messages.label_description), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_group_name, migrate), fake_migrate=fake_migrate, format='%(role)s (%(id)s)')) reference_table_group = 'reference %s' % settings.table_group_name if not settings.table_membership_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_membership_name, []) + signature_list db.define_table( settings.table_membership_name, Field('user_id', reference_table_user, label=self.messages.label_user_id), Field('group_id', reference_table_group, label=self.messages.label_group_id), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_membership_name, migrate), fake_migrate=fake_migrate)) if not settings.table_permission_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_permission_name, []) + signature_list db.define_table( settings.table_permission_name, Field('group_id', reference_table_group, label=self.messages.label_group_id), Field('name', default='default', length=512, label=self.messages.label_name, requires=is_not_empty), Field('table_name', length=512, label=self.messages.label_table_name), Field('record_id', 'integer', default=0, label=self.messages.label_record_id, requires=IS_INT_IN_RANGE(0, 10 ** 9)), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_permission_name, migrate), fake_migrate=fake_migrate)) if not settings.table_event_name in db.tables: db.define_table( settings.table_event_name, Field('time_stamp', 'datetime', default=current.request.now, label=self.messages.label_time_stamp), Field('client_ip', default=current.request.client, label=self.messages.label_client_ip), Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('origin', default='auth', length=512, label=self.messages.label_origin, requires=is_not_empty), Field('description', 'text', default='', label=self.messages.label_description, requires=is_not_empty), *settings.extra_fields.get(settings.table_event_name, []), **dict( migrate=self.__get_migrate( settings.table_event_name, migrate), fake_migrate=fake_migrate)) now = current.request.now if settings.cas_domains: if not settings.table_cas_name in db.tables: db.define_table( settings.table_cas_name, Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('created_on', 'datetime', default=now), Field('service', requires=IS_URL()), Field('ticket'), Field('renew', 'boolean', default=False), *settings.extra_fields.get(settings.table_cas_name, []), **dict( migrate=self.__get_migrate( settings.table_cas_name, migrate), fake_migrate=fake_migrate)) if settings.enable_tokens: extra_fields = settings.extra_fields.get( settings.table_token_name, []) + signature_list if not settings.table_token_name in db.tables: db.define_table( settings.table_token_name, Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('expires_on', 'datetime', default=datetime.datetime(2999,12,31)), Field('token',writable=False,default=web2py_uuid(),unique=True), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_token_name, migrate), fake_migrate=fake_migrate)) if not db._lazy_tables: settings.table_user = db[settings.table_user_name] settings.table_group = db[settings.table_group_name] settings.table_membership = db[settings.table_membership_name] settings.table_permission = db[settings.table_permission_name] settings.table_event = db[settings.table_event_name] if settings.cas_domains: settings.table_cas = db[settings.table_cas_name] if settings.cas_provider: # THIS IS NOT LAZY settings.actions_disabled = \ ['profile', 'register', 'change_password', 'request_reset_password', 'retrieve_username'] from gluon.contrib.login_methods.cas_auth import CasAuth maps = settings.cas_maps if not maps: table_user = self.table_user() maps = dict((name, lambda v, n=name: v.get(n, None)) for name in table_user.fields if name != 'id' and table_user[name].readable) maps['registration_id'] = \ lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user']) actions = [settings.cas_actions['login'], settings.cas_actions['servicevalidate'], settings.cas_actions['logout']] settings.login_form = CasAuth( casversion=2, urlbase=settings.cas_provider, actions=actions, maps=maps) return self def log_event(self, description, vars=None, origin='auth'): """ Examples: Use as:: auth.log_event(description='this happened', origin='auth') """ if not self.settings.logging_enabled or not description: return elif self.is_logged_in(): user_id = self.user.id else: user_id = None # user unknown vars = vars or {} # log messages should not be translated if type(description).__name__ == 'lazyT': description = description.m self.table_event().insert( description=str(description % vars), origin=origin, user_id=user_id) def get_or_create_user(self, keys, update_fields=['email'], login=True, get=True): """ Used for alternate login methods: If the user exists already then password is updated. If the user doesn't yet exist, then they are created. """ table_user = self.table_user() user = None checks = [] # make a guess about who this user is for fieldname in ['registration_id', 'username', 'email']: if fieldname in table_user.fields() and \ keys.get(fieldname, None): checks.append(fieldname) value = keys[fieldname] user = table_user(**{fieldname: value}) if user: break if not checks: return None if not 'registration_id' in keys: keys['registration_id'] = keys[checks[0]] # if we think we found the user but registration_id does not match, # make new user if 'registration_id' in checks \ and user \ and user.registration_id \ and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])): user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER? if user: if not get: # added for register_bare to avoid overwriting users return None update_keys = dict(registration_id=keys['registration_id']) for key in update_fields: if key in keys: update_keys[key] = keys[key] user.update_record(**update_keys) elif checks: if not 'first_name' in keys and 'first_name' in table_user.fields: guess = keys.get('email', 'anonymous').split('@')[0] keys['first_name'] = keys.get('username', guess) user_id = table_user.insert(**table_user._filter_fields(keys)) user = table_user[user_id] if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % user) self.add_membership(group_id, user_id) if self.settings.everybody_group_id: self.add_membership(self.settings.everybody_group_id, user_id) if login: self.user = user return user def basic(self, basic_auth_realm=False): """ Performs basic login. Args: basic_auth_realm: optional basic http authentication realm. Can take str or unicode or function or callable or boolean. reads current.request.env.http_authorization and returns basic_allowed,basic_accepted,user. if basic_auth_realm is defined is a callable it's return value is used to set the basic authentication realm, if it's a string its content is used instead. Otherwise basic authentication realm is set to the application name. If basic_auth_realm is None or False (the default) the behavior is to skip sending any challenge. """ if not self.settings.allow_basic_login: return (False, False, False) basic = current.request.env.http_authorization if basic_auth_realm: if callable(basic_auth_realm): basic_auth_realm = basic_auth_realm() elif isinstance(basic_auth_realm, (unicode, str)): basic_realm = unicode(basic_auth_realm) elif basic_auth_realm is True: basic_realm = u'' + current.request.application http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'}) if not basic or not basic[:6].lower() == 'basic ': if basic_auth_realm: raise http_401 return (True, False, False) (username, sep, password) = base64.b64decode(basic[6:]).partition(':') is_valid_user = sep and self.login_bare(username, password) if not is_valid_user and basic_auth_realm: raise http_401 return (True, True, is_valid_user) def login_user(self, user): """ Logins the `user = db.auth_user(id)` """ from gluon.settings import global_settings if global_settings.web2py_runtime_gae: user = Row(self.table_user()._filter_fields(user, id=True)) delattr(user, 'password') else: user = Row(user) for key, value in user.items(): if callable(value) or key == 'password': delattr(user, key) if self.settings.renew_session_onlogin: current.session.renew(clear_session=not self.settings.keep_session_onlogin) current.session.auth = Storage(user=user, last_visit=current.request.now, expiration=self.settings.expiration, hmac_key=web2py_uuid()) self.user = user self.update_groups() def _get_login_settings(self): table_user = self.table_user() userfield = self.settings.login_userfield or 'username' \ if 'username' in table_user.fields else 'email' passfield = self.settings.password_field return Storage({"table_user": table_user, "userfield": userfield, "passfield": passfield}) def login_bare(self, username, password): """ Logins user as specified by username (or email) and password """ settings = self._get_login_settings() user = settings.table_user(**{settings.userfield: \ username}) if user and user.get(settings.passfield, False): password = settings.table_user[ settings.passfield].validate(password)[0] if ((user.registration_key is None or not user.registration_key.strip()) and password == user[settings.passfield]): self.login_user(user) return user else: # user not in database try other login methods for login_method in self.settings.login_methods: if login_method != self and login_method(username, password): self.user = user return user return False def register_bare(self, **fields): """ Registers a user as specified by username (or email) and a raw password. """ settings = self._get_login_settings() # users can register_bare even if no password is provided, # in this case they will have to reset their password to login if fields.get(settings.passfield): fields[settings.passfield] = \ settings.table_user[settings.passfield].validate(fields[settings.passfield])[0] if not fields.get(settings.userfield): raise ValueError("register_bare: " + "userfield not provided or invalid") user = self.get_or_create_user(fields, login=False, get=False, update_fields=self.settings.update_fields) if not user: # get or create did not create a user (it ignores duplicate records) return False return user def cas_login(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, version=2, ): request = current.request response = current.response session = current.session db, table = self.db, self.table_cas() session._cas_service = request.vars.service or session._cas_service if not request.env.http_host in self.settings.cas_domains or \ not session._cas_service: raise HTTP(403, 'not authorized') def allow_access(interactivelogin=False): row = table(service=session._cas_service, user_id=self.user.id) if row: ticket = row.ticket else: ticket = 'ST-' + web2py_uuid() table.insert(service=session._cas_service, user_id=self.user.id, ticket=ticket, created_on=request.now, renew=interactivelogin) service = session._cas_service query_sep = '&' if '?' in service else '?' del session._cas_service if 'warn' in request.vars and not interactivelogin: response.headers[ 'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket return A("Continue to %s" % service, _href=service + query_sep + "ticket=" + ticket) else: redirect(service + query_sep + "ticket=" + ticket) if self.is_logged_in() and not 'renew' in request.vars: return allow_access() elif not self.is_logged_in() and 'gateway' in request.vars: redirect(service) def cas_onaccept(form, onaccept=onaccept): if not onaccept is DEFAULT: onaccept(form) return allow_access(interactivelogin=True) return self.login(next, onvalidation, cas_onaccept, log) def cas_validate(self, version=2, proxy=False): request = current.request db, table = self.db, self.table_cas() current.response.headers['Content-Type'] = 'text' ticket = request.vars.ticket renew = 'renew' in request.vars row = table(ticket=ticket) success = False if row: userfield = self.settings.login_userfield or 'username' \ if 'username' in table.fields else 'email' # If ticket is a service Ticket and RENEW flag respected if ticket[0:3] == 'ST-' and \ not ((row.renew and renew) ^ renew): user = self.table_user()(row.user_id) row.delete_record() success = True def build_response(body): return '<?xml version="1.0" encoding="UTF-8"?>\n' +\ TAG['cas:serviceResponse']( body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml() if success: if version == 1: message = 'yes\n%s' % user[userfield] else: # assume version 2 username = user.get('username', user[userfield]) message = build_response( TAG['cas:authenticationSuccess']( TAG['cas:user'](username), *[TAG['cas:' + field.name](user[field.name]) for field in self.table_user() if field.readable])) else: if version == 1: message = 'no\n' elif row: message = build_response(TAG['cas:authenticationFailure']()) else: message = build_response( TAG['cas:authenticationFailure']( 'Ticket %s not recognized' % ticket, _code='INVALID TICKET')) raise HTTP(200, message) def _reset_two_factor_auth(self, session): """When two-step authentication is enabled, this function is used to clear the session after successfully completing second challenge or when the maximum number of tries allowed has expired. """ session.auth_two_factor_user = None session.auth_two_factor = None session.auth_two_factor_enabled = False # Allow up to 4 attempts (the 1st one plus 3 more) session.auth_two_factor_tries_left = 3 def login(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a login form """ table_user = self.table_user() settings = self.settings if 'username' in table_user.fields or \ not settings.login_email_validate: tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty) if not settings.username_case_sensitive: tmpvalidator = [IS_LOWER(), tmpvalidator] else: tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email) if not settings.email_case_sensitive: tmpvalidator = [IS_LOWER(), tmpvalidator] request = current.request response = current.response session = current.session passfield = settings.password_field try: table_user[passfield].requires[-1].min_length = 0 except: pass ### use session for federated login snext = self.get_vars_next() if snext and self.settings.prevent_open_redirect_attacks: items = snext.split('/') if '//' in snext and items[2] != request.env.http_host: snext = None if snext: session._auth_next = snext elif session._auth_next: snext = session._auth_next ### pass if next is DEFAULT: # important for security next = settings.login_next if callable(next): next = next() user_next = snext if user_next: external = user_next.split('://') if external[0].lower() in ['http', 'https', 'ftp']: host_next = user_next.split('//', 1)[-1].split('/')[0] if host_next in settings.cas_domains: next = user_next else: next = user_next if onvalidation is DEFAULT: onvalidation = settings.login_onvalidation if onaccept is DEFAULT: onaccept = settings.login_onaccept if log is DEFAULT: log = self.messages['login_log'] onfail = settings.login_onfail user = None # default #Setup the default field used for the form multi_login = False if self.settings.login_userfield: username = self.settings.login_userfield else: if 'username' in table_user.fields: username = 'username' else: username = 'email' if self.settings.multi_login: multi_login = True old_requires = table_user[username].requires table_user[username].requires = tmpvalidator # If two-factor authentication is enabled, and the maximum # number of tries allowed is used up, reset the session to # pre-login state with two-factor auth if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1: # Exceeded maximum allowed tries for this code. Require user to enter # username and password again. user = None accepted_form = False self._reset_two_factor_auth(session) # Redirect to the default 'next' page without logging # in. If that page requires login, user will be redirected # back to the main login form redirect(next, client_side=settings.client_side) # Before showing the default login form, check whether # we are already on the second step of two-step authentication. # If we are, then skip this login form and use the form for the # second challenge instead. # Note to devs: The code inside the if-block is unchanged from the # previous version of this file, other than for indentation inside # to put it inside the if-block if session.auth_two_factor_user is None: if settings.remember_me_form: extra_fields = [ Field('remember_me', 'boolean', default=False, label = self.messages.label_remember_me)] else: extra_fields = [] # do we use our own login form, or from a central source? if settings.login_form == self: form = SQLFORM( table_user, fields=[username, passfield], hidden=dict(_next=next), showid=settings.showid, submit_button=self.messages.login_button, delete_label=self.messages.delete_label, formstyle=settings.formstyle, separator=settings.label_separator, extra_fields = extra_fields, ) captcha = settings.login_captcha or \ (settings.login_captcha != False and settings.captcha) if captcha: addrow(form, captcha.label, captcha, captcha.comment, settings.formstyle, 'captcha__row') accepted_form = False if form.accepts(request, session if self.csrf_prevention else None, formname='login', dbio=False, onvalidation=onvalidation, hideerror=settings.hideerror): accepted_form = True # check for username in db entered_username = form.vars[username] if multi_login and '@' in entered_username: # if '@' in username check for email, not username user = table_user(email = entered_username) else: user = table_user(**{username: entered_username}) if user: # user in db, check if registration pending or disabled temp_user = user if temp_user.registration_key == 'pending': response.flash = self.messages.registration_pending return form elif temp_user.registration_key in ('disabled', 'blocked'): response.flash = self.messages.login_disabled return form elif (not temp_user.registration_key is None and temp_user.registration_key.strip()): response.flash = \ self.messages.registration_verifying return form # try alternate logins 1st as these have the # current version of the password user = None for login_method in settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, settings.update_fields) break if not user: # alternates have failed, maybe because service inaccessible if settings.login_methods[0] == self: # try logging in locally using cached credentials if form.vars.get(passfield, '') == temp_user[passfield]: # success user = temp_user else: # user not in db if not settings.alternate_requires_registration: # we're allowed to auto-register users from external systems for login_method in settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, settings.update_fields) break if not user: self.log_event(self.messages['login_failed_log'], request.post_vars) # invalid login session.flash = self.messages.invalid_login callback(onfail, None) redirect( self.url(args=request.args, vars=request.get_vars), client_side=settings.client_side) else: # use a central authentication server cas = settings.login_form cas_user = cas.get_user() if cas_user: cas_user[passfield] = None user = self.get_or_create_user( table_user._filter_fields(cas_user), settings.update_fields) elif hasattr(cas, 'login_form'): return cas.login_form() else: # we need to pass through login again before going on next = self.url(settings.function, args='login') redirect(cas.login_url(next), client_side=settings.client_side) # Extra login logic for two-factor authentication ################################################# # If the 'user' variable has a value, this means that the first # authentication step was successful (i.e. user provided correct # username and password at the first challenge). # Check if this user is signed up for two-factor authentication # Default rule is that the user must be part of a group that is called # auth.settings.two_factor_authentication_group if user and self.settings.two_factor_authentication_group: role = self.settings.two_factor_authentication_group session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role) # challenge if session.auth_two_factor_enabled: form = SQLFORM.factory( Field('authentication_code', required=True, comment='This code was emailed to you and is required for login.'), hidden=dict(_next=next), formstyle=settings.formstyle, separator=settings.label_separator ) # accepted_form is used by some default web2py code later in the # function that handles running specified functions before redirect # Set it to False until the challenge form is accepted. accepted_form = False # Handle the case when a user has submitted the login/password # form successfully, and the password has been validated, but # the two-factor form has not been displayed or validated yet. if session.auth_two_factor_user is None and user is not None: session.auth_two_factor_user = user # store the validated user and associate with this session session.auth_two_factor = random.randint(100000, 999999) session.auth_two_factor_tries_left = 3 # Allow user to try up to 4 times # TODO: Add some error checking to handle cases where email cannot be sent self.settings.mailer.send( to=user.email, subject="Two-step Login Authentication Code", message="Your temporary login code is {0}".format(session.auth_two_factor)) if form.accepts(request, session if self.csrf_prevention else None, formname='login', dbio=False, onvalidation=onvalidation, hideerror=settings.hideerror): accepted_form = True if form.vars['authentication_code'] == str(session.auth_two_factor): # Handle the case when the two-factor form has been successfully validated # and the user was previously stored (the current user should be None because # in this case, the previous username/password login form should not be displayed. # This will allow the code after the 2-factor authentication block to proceed as # normal. if user is None or user == session.auth_two_factor_user: user = session.auth_two_factor_user # For security, because the username stored in the # session somehow does not match the just validated # user. Should not be possible without session stealing # which is hard with SSL. elif user != session.auth_two_factor_user: user = None # Either way, the user and code associated with this session should # be removed. This handles cases where the session login may have # expired but browser window is open, so the old session key and # session usernamem will still exist self._reset_two_factor_auth(session) else: # TODO: Limit the number of retries allowed. response.flash = 'Incorrect code. {0} more attempt(s) remaining.'.format(session.auth_two_factor_tries_left) session.auth_two_factor_tries_left -= 1 return form else: return form # End login logic for two-factor authentication # process authenticated users if user: user = Row(table_user._filter_fields(user, id=True)) # process authenticated users # user wants to be logged in for longer self.login_user(user) session.auth.expiration = \ request.post_vars.remember_me and \ settings.long_expiration or \ settings.expiration session.auth.remember_me = 'remember_me' in request.post_vars self.log_event(log, user) session.flash = self.messages.logged_in # how to continue if settings.login_form == self: if accepted_form: callback(onaccept, form) if next == session._auth_next: session._auth_next = None next = replace_id(next, form) redirect(next, client_side=settings.client_side) table_user[username].requires = old_requires return form elif user: callback(onaccept, None) if next == session._auth_next: del session._auth_next redirect(next, client_side=settings.client_side) def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT): """ Logouts and redirects to login """ # Clear out 2-step authentication information if user logs # out. This information is also cleared on successful login. self._reset_two_factor_auth(current.session) if next is DEFAULT: next = self.get_vars_next() or self.settings.logout_next if onlogout is DEFAULT: onlogout = self.settings.logout_onlogout if onlogout: onlogout(self.user) if log is DEFAULT: log = self.messages['logout_log'] if self.user: self.log_event(log, self.user) if self.settings.login_form != self: cas = self.settings.login_form cas_user = cas.get_user() if cas_user: next = cas.logout_url(next) current.session.auth = None if self.settings.renew_session_onlogout: current.session.renew(clear_session=not self.settings.keep_session_onlogout) current.session.flash = self.messages.logged_out if not next is None: redirect(next) def register(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a registration form """ table_user = self.table_user() request = current.request response = current.response session = current.session if self.is_logged_in(): redirect(self.settings.logged_url, client_side=self.settings.client_side) if next is DEFAULT: next = self.get_vars_next() or self.settings.register_next if onvalidation is DEFAULT: onvalidation = self.settings.register_onvalidation if onaccept is DEFAULT: onaccept = self.settings.register_onaccept if log is DEFAULT: log = self.messages['register_log'] table_user = self.table_user() if self.settings.login_userfield: username = self.settings.login_userfield elif 'username' in table_user.fields: username = 'username' else: username = 'email' # Ensure the username field is unique. unique_validator = IS_NOT_IN_DB(self.db, table_user[username]) if not table_user[username].requires: table_user[username].requires = unique_validator elif isinstance(table_user[username].requires, (list, tuple)): if not any([isinstance(validator, IS_NOT_IN_DB) for validator in table_user[username].requires]): if isinstance(table_user[username].requires, list): table_user[username].requires.append(unique_validator) else: table_user[username].requires += (unique_validator, ) elif not isinstance(table_user[username].requires, IS_NOT_IN_DB): table_user[username].requires = [table_user[username].requires, unique_validator] passfield = self.settings.password_field formstyle = self.settings.formstyle if self.settings.register_verify_password: extra_fields = [ Field("password_two", "password", requires=IS_EQUAL_TO( request.post_vars.get(passfield, None), error_message=self.messages.mismatched_password), label=current.T("Confirm Password"))] else: extra_fields = [] form = SQLFORM(table_user, fields=self.settings.register_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.register_button, delete_label=self.messages.delete_label, formstyle=formstyle, separator=self.settings.label_separator, extra_fields = extra_fields ) captcha = self.settings.register_captcha or self.settings.captcha if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') #Add a message if specified if self.settings.pre_registration_div: addrow(form, '', DIV(_id="pre-reg", *self.settings.pre_registration_div), '', formstyle, '') table_user.registration_key.default = key = web2py_uuid() if form.accepts(request, session if self.csrf_prevention else None, formname='register', onvalidation=onvalidation, hideerror=self.settings.hideerror): description = self.messages.group_description % form.vars if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % form.vars, description) self.add_membership(group_id, form.vars.id) if self.settings.everybody_group_id: self.add_membership( self.settings.everybody_group_id, form.vars.id) if self.settings.registration_requires_verification: link = self.url( self.settings.function, args=('verify_email', key), scheme=True) d = dict(form.vars) d.update(dict(key=key, link=link, username=form.vars[username])) if not (self.settings.mailer and self.settings.mailer.send( to=form.vars.email, subject=self.messages.verify_email_subject, message=self.messages.verify_email % d)): self.db.rollback() response.flash = self.messages.unable_send_email return form session.flash = self.messages.email_sent if self.settings.registration_requires_approval and \ not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='pending') session.flash = self.messages.registration_pending elif (not self.settings.registration_requires_verification or self.settings.login_after_registration): if not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='') session.flash = self.messages.registration_successful user = table_user(**{username: form.vars[username]}) self.login_user(user) session.flash = self.messages.logged_in self.log_event(log, form.vars) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def is_logged_in(self): """ Checks if the user is logged in and returns True/False. If so user is in auth.user as well as in session.auth.user """ if self.user: return True return False def verify_email(self, next=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Action used to verify the registration email """ key = getarg(-1) table_user = self.table_user() user = table_user(registration_key=key) if not user: redirect(self.settings.login_url) if self.settings.registration_requires_approval: user.update_record(registration_key='pending') current.session.flash = self.messages.registration_pending else: user.update_record(registration_key='') current.session.flash = self.messages.email_verified # make sure session has same user.registrato_key as db record if current.session.auth and current.session.auth.user: current.session.auth.user.registration_key = user.registration_key if log is DEFAULT: log = self.messages['verify_email_log'] if next is DEFAULT: next = self.settings.verify_email_next if onaccept is DEFAULT: onaccept = self.settings.verify_email_onaccept self.log_event(log, user) callback(onaccept, user) redirect(next) def retrieve_username(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to retrieve the user username (only if there is a username field) """ table_user = self.table_user() if not 'username' in table_user.fields: raise HTTP(404) request = current.request response = current.response session = current.session captcha = self.settings.retrieve_username_captcha or \ (self.settings.retrieve_username_captcha != False and self.settings.captcha) if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.get_vars_next() or self.settings.retrieve_username_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_username_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_username_onaccept if log is DEFAULT: log = self.messages['retrieve_username_log'] old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session if self.csrf_prevention else None, formname='retrieve_username', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): users = table_user._db(table_user.email==form.vars.email).select() if not users: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) username = ', '.join(u.username for u in users) self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_username_subject, message=self.messages.retrieve_username % dict(username=username)) session.flash = self.messages.email_sent for user in users: self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def random_password(self): import string import random password = '' specials = r'!#$*' for i in range(0, 3): password += random.choice(string.lowercase) password += random.choice(string.uppercase) password += random.choice(string.digits) password += random.choice(specials) return ''.join(random.sample(password, len(password))) def reset_password_deprecated(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password (deprecated) """ table_user = self.table_user() request = current.request response = current.response session = current.session if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.get_vars_next() or self.settings.retrieve_password_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_password_onaccept if log is DEFAULT: log = self.messages['retrieve_password_log'] old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session if self.csrf_prevention else None, formname='retrieve_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(email=form.vars.email) if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) elif user.registration_key in ('pending', 'disabled', 'blocked'): current.session.flash = \ self.messages.registration_pending redirect(self.url(args=request.args)) password = self.random_password() passfield = self.settings.password_field d = { passfield: str(table_user[passfield].validate(password)[0]), 'registration_key': '' } user.update_record(**d) if self.settings.mailer and \ self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_password_subject, message=self.messages.retrieve_password % dict(password=password)): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def confirm_registration( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to confirm user registration """ table_user = self.table_user() request = current.request # response = current.response session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.reset_password_next if self.settings.prevent_password_reset_attacks: key = request.vars.key if not key and len(request.args)>1: key = request.args[-1] if key: session._reset_password_key = key redirect(self.url(args='confirm_registration')) else: key = session._reset_password_key else: key = request.vars.key or getarg(-1) try: t0 = int(key.split('-')[0]) if time.time() - t0 > 60 * 60 * 24: raise Exception user = table_user(reset_password_key=key) if not user: raise Exception except Exception as e: session.flash = self.messages.invalid_reset_password redirect(self.url('login', vars=dict(test=e))) redirect(next, client_side=self.settings.client_side) passfield = self.settings.password_field form = SQLFORM.factory( Field('first_name', label='First Name', required=True), Field('last_name', label='Last Name', required=True), Field('new_password', 'password', label=self.messages.new_password, requires=self.table_user()[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button='Confirm Registration', hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.process().accepted: user.update_record( **{passfield: str(form.vars.new_password), 'first_name': str(form.vars.first_name), 'last_name': str(form.vars.last_name), 'registration_key': '', 'reset_password_key': ''}) session.flash = self.messages.password_changed if self.settings.login_after_password_change: self.login_user(user) redirect(next, client_side=self.settings.client_side) return form def email_registration(self, subject, body, user): """ Sends and email invitation to a user informing they have been registered with the application """ reset_password_key = str(int(time.time())) + '-' + web2py_uuid() link = self.url(self.settings.function, args=('confirm_registration',), vars={'key': reset_password_key}, scheme=True) d = dict(user) d.update(dict(key=reset_password_key, link=link, site=current.request.env.http_host)) if self.settings.mailer and self.settings.mailer.send( to=user.email, subject=subject % d, message=body % d): user.update_record(reset_password_key=reset_password_key) return True return False def bulk_register(self, max_emails=100): """ Creates a form for ther user to send invites to other users to join """ if not self.user: redirect(self.settings.login_url) if not self.setting.bulk_register_enabled: return HTTP(404) form = SQLFORM.factory( Field('subject','string',default=self.messages.bulk_invite_subject,requires=IS_NOT_EMPTY()), Field('emails','text',requires=IS_NOT_EMPTY()), Field('message','text',default=self.messages.bulk_invite_body,requires=IS_NOT_EMPTY()), formstyle=self.settings.formstyle) if form.process().accepted: emails = re.compile('[^\s\'"@<>,;:]+\@[^\s\'"@<>,;:]+').findall(form.vars.emails) # send the invitations emails_sent = [] emails_fail = [] emails_exist = [] for email in emails[:max_emails]: if self.table_user()(email=email): emails_exist.append(email) else: user = self.register_bare(email=email) if self.email_registration(form.vars.subject, form.vars.message, user): emails_sent.append(email) else: emails_fail.append(email) emails_fail += emails[max_emails:] form = DIV(H4('Emails sent'),UL(*[A(x,_href='mailto:'+x) for x in emails_sent]), H4('Emails failed'),UL(*[A(x,_href='mailto:'+x) for x in emails_fail]), H4('Emails existing'),UL(*[A(x,_href='mailto:'+x) for x in emails_exist])) return form def manage_tokens(self): if not self.user: redirect(self.settings.login_url) table_token =self.table_token() table_token.user_id.writable = False table_token.user_id.default = self.user.id table_token.token.writable = False if current.request.args(1) == 'new': table_token.token.readable = False form = SQLFORM.grid(table_token, args=['manage_tokens']) return form def reset_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password """ table_user = self.table_user() request = current.request # response = current.response session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.reset_password_next if self.settings.prevent_password_reset_attacks: key = request.vars.key if key: session._reset_password_key = key redirect(self.url(args='reset_password')) else: key = session._reset_password_key else: key = request.vars.key try: t0 = int(key.split('-')[0]) if time.time() - t0 > 60 * 60 * 24: raise Exception user = table_user(reset_password_key=key) if not user: raise Exception except Exception: session.flash = self.messages.invalid_reset_password redirect(next, client_side=self.settings.client_side) if onvalidation is DEFAULT: onvalidation = self.settings.reset_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.reset_password_onaccept passfield = self.settings.password_field form = SQLFORM.factory( Field('new_password', 'password', label=self.messages.new_password, requires=self.table_user()[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_reset_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, onvalidation=onvalidation, hideerror=self.settings.hideerror): user.update_record( **{passfield: str(form.vars.new_password), 'registration_key': '', 'reset_password_key': ''}) session.flash = self.messages.password_changed if self.settings.login_after_password_change: self.login_user(user) callback(onaccept, form) redirect(next, client_side=self.settings.client_side) return form def request_reset_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password """ table_user = self.table_user() request = current.request response = current.response session = current.session captcha = self.settings.retrieve_password_captcha or \ (self.settings.retrieve_password_captcha != False and self.settings.captcha) if next is DEFAULT: next = self.get_vars_next() or self.settings.request_reset_password_next if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if onvalidation is DEFAULT: onvalidation = self.settings.request_reset_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.request_reset_password_onaccept if log is DEFAULT: log = self.messages['reset_password_log'] userfield = self.settings.login_userfield or 'username' \ if 'username' in table_user.fields else 'email' if userfield == 'email': table_user.email.requires = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] if not self.settings.email_case_sensitive: table_user.email.requires.insert(0, IS_LOWER()) else: table_user.username.requires = [ IS_IN_DB(self.db, table_user.username, error_message=self.messages.invalid_username)] if not self.settings.username_case_sensitive: table_user.username.requires.insert(0, IS_LOWER()) form = SQLFORM(table_user, fields=[userfield], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.password_reset_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session if self.csrf_prevention else None, formname='reset_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(**{userfield:form.vars.get(userfield)}) if not user: session.flash = self.messages['invalid_%s' % userfield] redirect(self.url(args=request.args), client_side=self.settings.client_side) elif user.registration_key in ('pending', 'disabled', 'blocked'): session.flash = self.messages.registration_pending redirect(self.url(args=request.args), client_side=self.settings.client_side) if self.email_reset_password(user): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) # old_requires = table_user.email.requires return form def email_reset_password(self, user): reset_password_key = str(int(time.time())) + '-' + web2py_uuid() link = self.url(self.settings.function, args=('reset_password',), vars={'key': reset_password_key}, scheme=True) d = dict(user) d.update(dict(key=reset_password_key, link=link)) if self.settings.mailer and self.settings.mailer.send( to=user.email, subject=self.messages.reset_password_subject, message=self.messages.reset_password % d): user.update_record(reset_password_key=reset_password_key) return True return False def retrieve_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): if self.settings.reset_password_requires_verification: return self.request_reset_password(next, onvalidation, onaccept, log) else: return self.reset_password_deprecated(next, onvalidation, onaccept, log) def change_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form that lets the user change password """ if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) db = self.db table_user = self.table_user() s = db(table_user.id == self.user.id) request = current.request session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.change_password_next if onvalidation is DEFAULT: onvalidation = self.settings.change_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.change_password_onaccept if log is DEFAULT: log = self.messages['change_password_log'] passfield = self.settings.password_field requires = table_user[passfield].requires if not isinstance(requires, (list, tuple)): requires = [requires] requires = filter(lambda t: isinstance(t, CRYPT), requires) if requires: requires[0].min_length = 0 form = SQLFORM.factory( Field('old_password', 'password', requires=requires, label=self.messages.old_password), Field('new_password', 'password', label=self.messages.new_password, requires=table_user[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_change_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='change_password', onvalidation=onvalidation, hideerror=self.settings.hideerror): current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first() if not form.vars['old_password'] == current_user[passfield]: form.errors['old_password'] = self.messages.invalid_password else: d = {passfield: str(form.vars.new_password)} s.update(**d) session.flash = self.messages.password_changed self.log_event(log, self.user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def profile(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form that lets the user change his/her profile """ table_user = self.table_user() if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) passfield = self.settings.password_field table_user[passfield].writable = False request = current.request session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.profile_next if onvalidation is DEFAULT: onvalidation = self.settings.profile_onvalidation if onaccept is DEFAULT: onaccept = self.settings.profile_onaccept if log is DEFAULT: log = self.messages['profile_log'] form = SQLFORM( table_user, self.user.id, fields=self.settings.profile_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.profile_save_button, delete_label=self.messages.delete_label, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, deletable=self.settings.allow_delete_accounts, ) if form.accepts(request, session, formname='profile', onvalidation=onvalidation, hideerror=self.settings.hideerror): self.user.update(table_user._filter_fields(form.vars)) session.flash = self.messages.profile_updated self.log_event(log, self.user) callback(onaccept, form) if form.deleted: return self.logout() if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def run_login_onaccept(self): onaccept = self.settings.login_onaccept if onaccept: form = Storage(dict(vars=self.user)) if not isinstance(onaccept, (list, tuple)): onaccept = [onaccept] for callback in onaccept: callback(form) def is_impersonating(self): return self.is_logged_in() and 'impersonator' in current.session.auth def impersonate(self, user_id=DEFAULT): """ To use this make a POST to `http://..../impersonate request.post_vars.user_id=<id>` Set request.post_vars.user_id to 0 to restore original user. requires impersonator is logged in and:: has_permission('impersonate', 'auth_user', user_id) """ request = current.request session = current.session auth = session.auth table_user = self.table_user() if not self.is_logged_in(): raise HTTP(401, "Not Authorized") current_id = auth.user.id requested_id = user_id if user_id is DEFAULT: user_id = current.request.post_vars.user_id if user_id and user_id != self.user.id and user_id != '0': if not self.has_permission('impersonate', self.table_user(), user_id): raise HTTP(403, "Forbidden") user = table_user(user_id) if not user: raise HTTP(401, "Not Authorized") auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL) auth.user.update( table_user._filter_fields(user, True)) self.user = auth.user self.update_groups() log = self.messages['impersonate_log'] self.log_event(log, dict(id=current_id, other_id=auth.user.id)) self.run_login_onaccept() elif user_id in (0, '0'): if self.is_impersonating(): session.clear() session.update(pickle.loads(auth.impersonator)) self.user = session.auth.user self.update_groups() self.run_login_onaccept() return None if requested_id is DEFAULT and not request.post_vars: return SQLFORM.factory(Field('user_id', 'integer')) return SQLFORM(table_user, user.id, readonly=True) def update_groups(self): if not self.user: return user_groups = self.user_groups = {} if current.session.auth: current.session.auth.user_groups = self.user_groups table_group = self.table_group() table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() for membership in memberships: group = table_group(membership.group_id) if group: user_groups[membership.group_id] = group.role def groups(self): """ Displays the groups and their roles for the logged in user """ if not self.is_logged_in(): redirect(self.settings.login_url) table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() table = TABLE() for membership in memberships: table_group = self.table_group() groups = self.db(table_group.id == membership.group_id).select() if groups: group = groups[0] table.append(TR(H3(group.role, '(%s)' % group.id))) table.append(TR(P(group.description))) if not memberships: return None return table def not_authorized(self): """ You can change the view for this page to make it look as you like """ if current.request.ajax: raise HTTP(403, 'ACCESS DENIED') return self.messages.access_denied def requires(self, condition, requires_login=True, otherwise=None): """ Decorator that prevents access to action if not logged in """ def decorator(action): def f(*a, **b): basic_allowed, basic_accepted, user = self.basic() user = user or self.user if requires_login: if not user: if current.request.ajax: raise HTTP(401, self.messages.ajax_failed_authentication) elif not otherwise is None: if callable(otherwise): return otherwise() redirect(otherwise) elif self.settings.allow_basic_login_only or \ basic_accepted or current.request.is_restful: raise HTTP(403, "Not authorized") else: next = self.here() current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next=' + urllib.quote(next)) if callable(condition): flag = condition() else: flag = condition if not flag: current.session.flash = self.messages.access_denied return call_or_redirect( self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_login(self, otherwise=None): """ Decorator that prevents access to action if not logged in """ return self.requires(True, otherwise=otherwise) def requires_login_or_token(self, otherwise=None): if self.settings.enable_tokens == True: user = None request = current.request token = request.env.http_web2py_user_token or request.vars._token table_token = self.table_token() table_user = self.table_user() from gluon.settings import global_settings if global_settings.web2py_runtime_gae: row = table_token(token=token) if row: user = table_user(row.user_id) else: row = self.db(table_token.token==token)(table_user.id==table_token.user_id).select().first() if row: user = row[table_user._tablename] if user: self.login_user(user) return self.requires(True, otherwise=otherwise) def requires_membership(self, role=None, group_id=None, otherwise=None): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def has_membership(self=self, group_id=group_id, role=role): return self.has_membership(group_id=group_id, role=role) return self.requires(has_membership, otherwise=otherwise) def requires_permission(self, name, table_name='', record_id=0, otherwise=None): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'. """ def has_permission(self=self, name=name, table_name=table_name, record_id=record_id): return self.has_permission(name, table_name, record_id) return self.requires(has_permission, otherwise=otherwise) def requires_signature(self, otherwise=None, hash_vars=True): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def verify(): return URL.verify(current.request, user_signature=True, hash_vars=hash_vars) return self.requires(verify, otherwise) def add_group(self, role, description=''): """ Creates a group associated to a role """ group_id = self.table_group().insert( role=role, description=description) self.log_event(self.messages['add_group_log'], dict(group_id=group_id, role=role)) return group_id def del_group(self, group_id): """ Deletes a group """ self.db(self.table_group().id == group_id).delete() self.db(self.table_membership().group_id == group_id).delete() self.db(self.table_permission().group_id == group_id).delete() if group_id in self.user_groups: del self.user_groups[group_id] self.log_event(self.messages.del_group_log, dict(group_id=group_id)) def id_group(self, role): """ Returns the group_id of the group specified by the role """ rows = self.db(self.table_group().role == role).select() if not rows: return None return rows[0].id def user_group(self, user_id=None): """ Returns the group_id of the group uniquely associated to this user i.e. `role=user:[user_id]` """ return self.id_group(self.user_group_role(user_id)) def user_group_role(self, user_id=None): if not self.settings.create_user_groups: return None if user_id: user = self.table_user()[user_id] else: user = self.user return self.settings.create_user_groups % user def has_membership(self, group_id=None, user_id=None, role=None): """ Checks if user is member of group_id or role """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() if group_id and user_id and self.db((membership.user_id == user_id) & (membership.group_id == group_id)).select(): r = True else: r = False self.log_event(self.messages['has_membership_log'], dict(user_id=user_id, group_id=group_id, check=r)) return r def add_membership(self, group_id=None, user_id=None, role=None): """ Gives user_id membership of group_id or role if user is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() record = membership(user_id=user_id, group_id=group_id) if record: return record.id else: id = membership.insert(group_id=group_id, user_id=user_id) if role: self.user_groups[group_id] = role else: self.update_groups() self.log_event(self.messages['add_membership_log'], dict(user_id=user_id, group_id=group_id)) return id def del_membership(self, group_id=None, user_id=None, role=None): """ Revokes membership from group_id to user_id if user_id is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) if not user_id and self.user: user_id = self.user.id membership = self.table_membership() self.log_event(self.messages['del_membership_log'], dict(user_id=user_id, group_id=group_id)) ret = self.db(membership.user_id == user_id)(membership.group_id == group_id).delete() if group_id in self.user_groups: del self.user_groups[group_id] return ret def has_permission(self, name='any', table_name='', record_id=0, user_id=None, group_id=None, ): """ Checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission """ if not group_id and self.settings.everybody_group_id and \ self.has_permission( name, table_name, record_id, user_id=None, group_id=self.settings.everybody_group_id): return True if not user_id and not group_id and self.user: user_id = self.user.id if user_id: membership = self.table_membership() rows = self.db(membership.user_id == user_id).select(membership.group_id) groups = set([row.group_id for row in rows]) if group_id and not group_id in groups: return False else: groups = set([group_id]) permission = self.table_permission() rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == record_id).select(permission.group_id) groups_required = set([row.group_id for row in rows]) if record_id: rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == 0).select(permission.group_id) groups_required = groups_required.union(set([row.group_id for row in rows])) if groups.intersection(groups_required): r = True else: r = False if user_id: self.log_event(self.messages['has_permission_log'], dict(user_id=user_id, name=name, table_name=table_name, record_id=record_id)) return r def add_permission(self, group_id, name='any', table_name='', record_id=0, ): """ Gives group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() if group_id == 0: group_id = self.user_group() record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))( permission.record_id == long(record_id)).select(limitby=(0, 1), orderby_on_limitby=False).first() if record: id = record.id else: id = permission.insert(group_id=group_id, name=name, table_name=str(table_name), record_id=long(record_id)) self.log_event(self.messages['add_permission_log'], dict(permission_id=id, group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return id def del_permission(self, group_id, name='any', table_name='', record_id=0, ): """ Revokes group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() self.log_event(self.messages['del_permission_log'], dict(group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == long(record_id)).delete() def accessible_query(self, name, table, user_id=None): """ Returns a query with all accessible records for user_id or the current logged in user this method does not work on GAE because uses JOIN and IN Example: Use as:: db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL) """ if not user_id: user_id = self.user_id db = self.db if isinstance(table, str) and table in self.db.tables(): table = self.db[table] elif isinstance(table, (Set, Query)): # experimental: build a chained query for all tables if isinstance(table, Set): cquery = table.query else: cquery = table tablenames = db._adapter.tables(cquery) for tablename in tablenames: cquery &= self.accessible_query(name, tablename, user_id=user_id) return cquery if not isinstance(table, str) and\ self.has_permission(name, table, 0, user_id): return table.id > 0 membership = self.table_membership() permission = self.table_permission() query = table.id.belongs( db(membership.user_id == user_id) (membership.group_id == permission.group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) if self.settings.everybody_group_id: query |= table.id.belongs( db(permission.group_id == self.settings.everybody_group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) return query @staticmethod def archive(form, archive_table=None, current_record='current_record', archive_current=False, fields=None): """ If you have a table (db.mytable) that needs full revision history you can just do:: form=crud.update(db.mytable,myrecord,onaccept=auth.archive) or:: form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive) crud.archive will define a new table "mytable_archive" and store a copy of the current record (if archive_current=True) or a copy of the previous record (if archive_current=False) in the newly created table including a reference to the current record. fields allows to specify extra fields that need to be archived. If you want to access such table you need to define it yourself in a model:: db.define_table('mytable_archive', Field('current_record',db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like:: db.define_table(..., Field('saved_on','datetime', default=request.now,update=request.now,writable=False), Field('saved_by',auth.user, default=auth.user_id,update=auth.user_id,writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example:: db.define_table('myhistory', Field('parent_record',db.mytable), db.mytable) and use it as:: form=crud.update(db.mytable,myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record')) """ if not archive_current and not form.record: return None table = form.table if not archive_table: archive_table_name = '%s_archive' % table if not archive_table_name in table._db: table._db.define_table( archive_table_name, Field(current_record, table), *[field.clone(unique=False) for field in table]) archive_table = table._db[archive_table_name] new_record = {current_record: form.vars.id} for fieldname in archive_table.fields: if not fieldname in ['id', current_record]: if archive_current and fieldname in form.vars: new_record[fieldname] = form.vars[fieldname] elif form.record and fieldname in form.record: new_record[fieldname] = form.record[fieldname] if fields: new_record.update(fields) id = archive_table.insert(**new_record) return id def wiki(self, slug=None, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, resolve=True, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None, force_render=False, groups=None): if controller and function: resolve = False if not hasattr(self, '_wiki'): self._wiki = Wiki(self, render=render, manage_permissions=manage_permissions, force_prefix=force_prefix, restrict_search=restrict_search, env=env, extra=extra or {}, menu_groups=menu_groups, templates=templates, migrate=migrate, controller=controller, function=function, groups=groups) else: self._wiki.env.update(env or {}) # if resolve is set to True, process request as wiki call # resolve=False allows initial setup without wiki redirection wiki = None if resolve: if slug: wiki = self._wiki.read(slug, force_render) if isinstance(wiki, dict) and wiki.has_key('content'): # FIXME: .has_key() is deprecated # We don't want to return a dict object, just the wiki wiki = wiki['content'] else: wiki = self._wiki() if isinstance(wiki, basestring): wiki = XML(wiki) return wiki def wikimenu(self): """To be used in menu.py for app wide wiki menus""" if (hasattr(self, "_wiki") and self._wiki.settings.controller and self._wiki.settings.function): self._wiki.automenu() class Crud(object): def url(self, f=None, args=None, vars=None): """ This should point to the controller that exposes download and crud """ if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars) def __init__(self, environment, db=None, controller='default'): self.db = db if not db and environment and isinstance(environment, DAL): self.db = environment elif not db: raise SyntaxError("must pass db as first or second argument") self.environment = current settings = self.settings = Settings() settings.auth = None settings.logger = None settings.create_next = None settings.update_next = None settings.controller = controller settings.delete_next = self.url() settings.download_url = self.url('download') settings.create_onvalidation = StorageList() settings.update_onvalidation = StorageList() settings.delete_onvalidation = StorageList() settings.create_onaccept = StorageList() settings.update_onaccept = StorageList() settings.update_ondelete = StorageList() settings.delete_onaccept = StorageList() settings.update_deletable = True settings.showid = False settings.keepvalues = False settings.create_captcha = None settings.update_captcha = None settings.captcha = None settings.formstyle = 'table3cols' settings.label_separator = ': ' settings.hideerror = False settings.detect_record_change = True settings.hmac_key = None settings.lock_keys = True messages = self.messages = Messages(current.T) messages.submit_button = 'Submit' messages.delete_label = 'Check to delete' messages.record_created = 'Record Created' messages.record_updated = 'Record Updated' messages.record_deleted = 'Record Deleted' messages.update_log = 'Record %(id)s updated' messages.create_log = 'Record %(id)s created' messages.read_log = 'Record %(id)s read' messages.delete_log = 'Record %(id)s deleted' messages.lock_keys = True def __call__(self): args = current.request.args if len(args) < 1: raise HTTP(404) elif args[0] == 'tables': return self.tables() elif len(args) > 1 and not args(1) in self.db.tables: raise HTTP(404) table = self.db[args(1)] if args[0] == 'create': return self.create(table) elif args[0] == 'select': return self.select(table, linkto=self.url(args='read')) elif args[0] == 'search': form, rows = self.search(table, linkto=self.url(args='read')) return DIV(form, SQLTABLE(rows)) elif args[0] == 'read': return self.read(table, args(2)) elif args[0] == 'update': return self.update(table, args(2)) elif args[0] == 'delete': return self.delete(table, args(2)) else: raise HTTP(404) def log_event(self, message, vars): if self.settings.logger: self.settings.logger.log_event(message, vars, origin='crud') def has_permission(self, name, table, record=0): if not self.settings.auth: return True try: record_id = record.id except: record_id = record return self.settings.auth.has_permission(name, str(table), record_id) def tables(self): return TABLE(*[TR(A(name, _href=self.url(args=('select', name)))) for name in self.db.tables]) @staticmethod def archive(form, archive_table=None, current_record='current_record'): return Auth.archive(form, archive_table=archive_table, current_record=current_record) def update(self, table, record, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, ondelete=DEFAULT, log=DEFAULT, message=DEFAULT, deletable=DEFAULT, formname=DEFAULT, **attributes ): if not (isinstance(table, Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] try: record_id = record.id except: record_id = record or 0 if record_id and not self.has_permission('update', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) if not record_id and not self.has_permission('create', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request response = current.response session = current.session if request.extension == 'json' and request.vars.json: request.vars.update(json_parser.loads(request.vars.json)) if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.update_next if onvalidation is DEFAULT: onvalidation = self.settings.update_onvalidation if onaccept is DEFAULT: onaccept = self.settings.update_onaccept if ondelete is DEFAULT: ondelete = self.settings.update_ondelete if log is DEFAULT: log = self.messages['update_log'] if deletable is DEFAULT: deletable = self.settings.update_deletable if message is DEFAULT: message = self.messages.record_updated if not 'hidden' in attributes: attributes['hidden'] = {} attributes['hidden']['_next'] = next form = SQLFORM( table, record, showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, deletable=deletable, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, **attributes # contains hidden ) self.accepted = False self.deleted = False captcha = self.settings.update_captcha or self.settings.captcha if record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') captcha = self.settings.create_captcha or self.settings.captcha if not record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if not request.extension in ('html', 'load'): (_session, _formname) = (None, None) else: (_session, _formname) = ( session, '%s/%s' % (table._tablename, form.record_id)) if not formname is DEFAULT: _formname = formname keepvalues = self.settings.keepvalues if request.vars.delete_this_record: keepvalues = False if isinstance(onvalidation, StorageList): onvalidation = onvalidation.get(table._tablename, []) if form.accepts(request, _session, formname=_formname, onvalidation=onvalidation, keepvalues=keepvalues, hideerror=self.settings.hideerror, detect_record_change=self.settings.detect_record_change): self.accepted = True response.flash = message if log: self.log_event(log, form.vars) if request.vars.delete_this_record: self.deleted = True message = self.messages.record_deleted callback(ondelete, form, table._tablename) response.flash = message callback(onaccept, form, table._tablename) if not request.extension in ('html', 'load'): raise HTTP(200, 'RECORD CREATED/UPDATED') if isinstance(next, (list, tuple)): # fix issue with 2.6 next = next[0] if next: # Only redirect when explicit next = replace_id(next, form) session.flash = response.flash redirect(next) elif not request.extension in ('html', 'load'): raise HTTP(401, serializers.json(dict(errors=form.errors))) return form def create(self, table, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, message=DEFAULT, formname=DEFAULT, **attributes ): if next is DEFAULT: next = self.settings.create_next if onvalidation is DEFAULT: onvalidation = self.settings.create_onvalidation if onaccept is DEFAULT: onaccept = self.settings.create_onaccept if log is DEFAULT: log = self.messages['create_log'] if message is DEFAULT: message = self.messages.record_created return self.update( table, None, next=next, onvalidation=onvalidation, onaccept=onaccept, log=log, message=message, deletable=False, formname=formname, **attributes ) def read(self, table, record): if not (isinstance(table, Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] if not self.has_permission('read', table, record): redirect(self.settings.auth.settings.on_failed_authorization) form = SQLFORM( table, record, readonly=True, comments=False, upload=self.settings.download_url, showid=self.settings.showid, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if not current.request.extension in ('html', 'load'): return table._filter_fields(form.record, id=True) return form def delete(self, table, record_id, next=DEFAULT, message=DEFAULT, ): if not (isinstance(table, Table) or table in self.db.tables): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] if not self.has_permission('delete', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request session = current.session if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.delete_next if message is DEFAULT: message = self.messages.record_deleted record = table[record_id] if record: callback(self.settings.delete_onvalidation, record) del table[record_id] callback(self.settings.delete_onaccept, record, table._tablename) session.flash = message redirect(next) def rows( self, table, query=None, fields=None, orderby=None, limitby=None, ): if not (isinstance(table, Table) or table in self.db.tables): raise HTTP(404) if not self.has_permission('select', table): redirect(self.settings.auth.settings.on_failed_authorization) #if record_id and not self.has_permission('select', table): # redirect(self.settings.auth.settings.on_failed_authorization) if not isinstance(table, Table): table = self.db[table] if not query: query = table.id > 0 if not fields: fields = [field for field in table if field.readable] else: fields = [table[f] if isinstance(f, str) else f for f in fields] rows = self.db(query).select(*fields, **dict(orderby=orderby, limitby=limitby)) return rows def select(self, table, query=None, fields=None, orderby=None, limitby=None, headers=None, **attr ): headers = headers or {} rows = self.rows(table, query, fields, orderby, limitby) if not rows: return None # Nicer than an empty table. if not 'upload' in attr: attr['upload'] = self.url('download') if not current.request.extension in ('html', 'load'): return rows.as_list() if not headers: if isinstance(table, str): table = self.db[table] headers = dict((str(k), k.label) for k in table) return SQLTABLE(rows, headers=headers, **attr) def get_format(self, field): rtable = field._db[field.type[10:]] format = rtable.get('_format', None) if format and isinstance(format, str): return format[2:-2] return field.name def get_query(self, field, op, value, refsearch=False): try: if refsearch: format = self.get_format(field) if op == 'equals': if not refsearch: return field == value else: return lambda row: row[field.name][format] == value elif op == 'not equal': if not refsearch: return field != value else: return lambda row: row[field.name][format] != value elif op == 'greater than': if not refsearch: return field > value else: return lambda row: row[field.name][format] > value elif op == 'less than': if not refsearch: return field < value else: return lambda row: row[field.name][format] < value elif op == 'starts with': if not refsearch: return field.like(value + '%') else: return lambda row: str(row[field.name][format]).startswith(value) elif op == 'ends with': if not refsearch: return field.like('%' + value) else: return lambda row: str(row[field.name][format]).endswith(value) elif op == 'contains': if not refsearch: return field.like('%' + value + '%') else: return lambda row: value in row[field.name][format] except: return None def search(self, *tables, **args): """ Creates a search form and its results for a table Examples: Use as:: form, results = crud.search(db.test, queries = ['equals', 'not equal', 'contains'], query_labels={'equals':'Equals', 'not equal':'Not equal'}, fields = ['id','children'], field_labels = { 'id':'ID','children':'Children'}, zero='Please choose', query = (db.test.id > 0)&(db.test.id != 3) ) """ table = tables[0] fields = args.get('fields', table.fields) validate = args.get('validate', True) request = current.request db = self.db if not (isinstance(table, Table) or table in db.tables): raise HTTP(404) attributes = {} for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'): if key in args: attributes[key] = args[key] tbl = TABLE() selected = [] refsearch = [] results = [] showall = args.get('showall', False) if showall: selected = fields chkall = args.get('chkall', False) if chkall: for f in fields: request.vars['chk%s' % f] = 'on' ops = args.get('queries', []) zero = args.get('zero', '') if not ops: ops = ['equals', 'not equal', 'greater than', 'less than', 'starts with', 'ends with', 'contains'] ops.insert(0, zero) query_labels = args.get('query_labels', {}) query = args.get('query', table.id > 0) field_labels = args.get('field_labels', {}) for field in fields: field = table[field] if not field.readable: continue fieldname = field.name chkval = request.vars.get('chk' + fieldname, None) txtval = request.vars.get('txt' + fieldname, None) opval = request.vars.get('op' + fieldname, None) row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname, _disabled=(field.type == 'id'), value=(field.type == 'id' or chkval == 'on'))), TD(field_labels.get(fieldname, field.label)), TD(SELECT([OPTION(query_labels.get(op, op), _value=op) for op in ops], _name="op" + fieldname, value=opval)), TD(INPUT(_type="text", _name="txt" + fieldname, _value=txtval, _id='txt' + fieldname, _class=str(field.type)))) tbl.append(row) if request.post_vars and (chkval or field.type == 'id'): if txtval and opval != '': if field.type[0:10] == 'reference ': refsearch.append(self.get_query(field, opval, txtval, refsearch=True)) elif validate: value, error = field.validate(txtval) if not error: ### TODO deal with 'starts with', 'ends with', 'contains' on GAE query &= self.get_query(field, opval, value) else: row[3].append(DIV(error, _class='error')) else: query &= self.get_query(field, opval, txtval) selected.append(field) form = FORM(tbl, INPUT(_type="submit")) if selected: try: results = db(query).select(*selected, **attributes) for r in refsearch: results = results.find(r) except: # hmmm, we should do better here results = None return form, results urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor())) def fetch(url, data=None, headers=None, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): headers = headers or {} if not data is None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join( ['%s=%s;' % (c.key, c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data is None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False, follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html regex_geocode = \ re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""") def geocode(address): try: a = urllib.quote(address) txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s' % a) item = regex_geocode.search(txt) (la, lo) = (float(item.group('la')), float(item.group('lo'))) return (la, lo) except: return (0.0, 0.0) def reverse_geocode(lat, lng, lang=None): """ Try to get an approximate address for a given latitude, longitude. """ if not lang: lang = current.T.accepted_language try: return json_parser.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address'] except: return '' def universal_caller(f, *a, **b): c = f.func_code.co_argcount n = f.func_code.co_varnames[:c] defaults = f.func_defaults or [] pos_args = n[0:-len(defaults)] named_args = n[-len(defaults):] arg_dict = {} # Fill the arg_dict with name and value for the submitted, positional values for pos_index, pos_val in enumerate(a[:c]): arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument # There might be pos_args left, that are sent as named_values. Gather them as well. # If a argument already is populated with values we simply replaces them. for arg_name in pos_args[len(arg_dict):]: if arg_name in b: arg_dict[arg_name] = b[arg_name] if len(arg_dict) >= len(pos_args): # All the positional arguments is found. The function may now be called. # However, we need to update the arg_dict with the values from the named arguments as well. for arg_name in named_args: if arg_name in b: arg_dict[arg_name] = b[arg_name] return f(**arg_dict) # Raise an error, the function cannot be called. raise HTTP(404, "Object does not exist") class Service(object): def __init__(self, environment=None): self.run_procedures = {} self.csv_procedures = {} self.xml_procedures = {} self.rss_procedures = {} self.json_procedures = {} self.jsonrpc_procedures = {} self.jsonrpc2_procedures = {} self.xmlrpc_procedures = {} self.amfrpc_procedures = {} self.amfrpc3_procedures = {} self.soap_procedures = {} def run(self, f): """ Example: Use as:: service = Service() @service.run def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/run/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def csv(self, f): """ Example: Use as:: service = Service() @service.csv def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/csv/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def xml(self, f): """ Example: Use as:: service = Service() @service.xml def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/xml/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def rss(self, f): """ Example: Use as:: service = Service() @service.rss def myfunction(): return dict(title=..., link=..., description=..., created_on=..., entries=[dict(title=..., link=..., description=..., created_on=...]) def call(): return service() Then call it with: wget http://..../app/default/call/rss/myfunction """ self.rss_procedures[f.__name__] = f return f def json(self, f): """ Example: Use as:: service = Service() @service.json def myfunction(a, b): return [{a: b}] def call(): return service() Then call it with:; wget http://..../app/default/call/json/myfunction?a=hello&b=world """ self.json_procedures[f.__name__] = f return f def jsonrpc(self, f): """ Example: Use as:: service = Service() @service.jsonrpc def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world """ self.jsonrpc_procedures[f.__name__] = f return f def jsonrpc2(self, f): """ Example: Use as:: service = Service() @service.jsonrpc2 def myfunction(a, b): return a + b def call(): return service() Then call it with: wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2 """ self.jsonrpc2_procedures[f.__name__] = f return f def xmlrpc(self, f): """ Example: Use as:: service = Service() @service.xmlrpc def myfunction(a, b): return a + b def call(): return service() The call it with: wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world """ self.xmlrpc_procedures[f.__name__] = f return f def amfrpc(self, f): """ Example: Use as:: service = Service() @service.amfrpc def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world """ self.amfrpc_procedures[f.__name__] = f return f def amfrpc3(self, domain='default'): """ Example: Use as:: service = Service() @service.amfrpc3('domain') def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world """ if not isinstance(domain, str): raise SyntaxError("AMF3 requires a domain for function") def _amfrpc3(f): if domain: self.amfrpc3_procedures[domain + '.' + f.__name__] = f else: self.amfrpc3_procedures[f.__name__] = f return f return _amfrpc3 def soap(self, name=None, returns=None, args=None, doc=None): """ Example: Use as:: service = Service() @service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,}) def myfunction(a, b): return a + b def call(): return service() Then call it with:: from gluon.contrib.pysimplesoap.client import SoapClient client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL") response = client.MyFunction(a=1,b=2) return response['result'] It also exposes online generated documentation and xml example messages at `http://..../app/default/call/soap` """ def _soap(f): self.soap_procedures[name or f.__name__] = f, returns, args, doc return f return _soap def serve_run(self, args=None): request = current.request if not args: args = request.args if args and args[0] in self.run_procedures: return str(universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars))) self.error() def serve_csv(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/x-csv' if not args: args = request.args def none_exception(value): if isinstance(value, unicode): return value.encode('utf8') if hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') if value is None: return '<NULL>' return value if args and args[0] in self.run_procedures: import types r = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) s = cStringIO.StringIO() if hasattr(r, 'export_to_csv_file'): r.export_to_csv_file(s) elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)): import csv writer = csv.writer(s) writer.writerow(r[0].keys()) for line in r: writer.writerow([none_exception(v) for v in line.values()]) else: import csv writer = csv.writer(s) for line in r: writer.writerow(line) return s.getvalue() self.error() def serve_xml(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/xml' if not args: args = request.args if args and args[0] in self.run_procedures: s = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) if hasattr(s, 'as_list'): s = s.as_list() return serializers.xml(s, quote=False) self.error() def serve_rss(self, args=None): request = current.request response = current.response if not args: args = request.args if args and args[0] in self.rss_procedures: feed = universal_caller(self.rss_procedures[args[0]], *args[1:], **dict(request.vars)) else: self.error() response.headers['Content-Type'] = 'application/rss+xml' return serializers.rss(feed) def serve_json(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' if not args: args = request.args d = dict(request.vars) if args and args[0] in self.json_procedures: s = universal_caller(self.json_procedures[args[0]], *args[1:], **d) if hasattr(s, 'as_list'): s = s.as_list() return response.json(s) self.error() class JsonRpcException(Exception): def __init__(self, code, info): jrpc_error = Service.jsonrpc_errors.get(code) if jrpc_error: self.message, self.description = jrpc_error self.code, self.info = code, info # jsonrpc 2.0 error types. records the following structure {code: (message,meaning)} jsonrpc_errors = { -32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."), -32600: ("Invalid Request", "The JSON sent is not a valid Request object."), -32601: ("Method not found", "The method does not exist / is not available."), -32602: ("Invalid params", "Invalid method parameter(s)."), -32603: ("Internal error", "Internal JSON-RPC error."), -32099: ("Server error", "Reserved for implementation-defined server-errors.")} def serve_jsonrpc(self): def return_response(id, result): return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None}) def return_error(id, code, message, data=None): error = {'name': 'JSONRPCError', 'code': code, 'message': message} if data is not None: error['data'] = data return serializers.json({'id': id, 'version': '1.1', 'error': error, }) request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' methods = self.jsonrpc_procedures data = json_parser.loads(request.body.read()) jsonrpc_2 = data.get('jsonrpc') if jsonrpc_2: #hand over to version 2 of the protocol return self.serve_jsonrpc2(data) id, method, params = data.get('id'), data.get('method'), data.get('params', []) if id is None: return return_error(0, 100, 'missing id') if not method in methods: return return_error(id, 100, 'method "%s" does not exist' % method) try: if isinstance(params, dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() return return_response(id, s) except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except: etype, eval, etb = sys.exc_info() message = '%s: %s' % (etype.__name__, eval) data = request.is_local and traceback.format_tb(etb) logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb))) return return_error(id, 100, message, data) def serve_jsonrpc2(self, data=None, batch_element=False): def return_response(id, result): if not must_respond: return None return serializers.json({'jsonrpc': '2.0', 'id': id, 'result': result}) def return_error(id, code, message=None, data=None): error = {'code': code} if Service.jsonrpc_errors.has_key(code): error['message'] = Service.jsonrpc_errors[code][0] error['data'] = Service.jsonrpc_errors[code][1] if message is not None: error['message'] = message if data is not None: error['data'] = data return serializers.json({'jsonrpc': '2.0', 'id': id, 'error': error}) def validate(data): """ Validate request as defined in: http://www.jsonrpc.org/specification#request_object. Args: data(str): The json object. Returns: - True -- if successful - False -- if no error should be reported (i.e. data is missing 'id' member) Raises: JsonRPCException """ iparms = set(data.keys()) mandatory_args = set(['jsonrpc', 'method']) missing_args = mandatory_args - iparms if missing_args: raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args)) if data['jsonrpc'] != '2.0': raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc']) if 'id' not in iparms: return False return True request = current.request response = current.response if not data: response.headers['Content-Type'] = 'application/json; charset=utf-8' try: data = json_parser.loads(request.body.read()) except ValueError: # decoding error in json lib return return_error(None, -32700) # Batch handling if isinstance(data, list) and not batch_element: retlist = [] for c in data: retstr = self.serve_jsonrpc2(c, batch_element=True) if retstr: # do not add empty responses retlist.append(retstr) if len(retlist) == 0: # return nothing return '' else: return "[" + ','.join(retlist) + "]" methods = self.jsonrpc2_procedures methods.update(self.jsonrpc_procedures) try: must_respond = validate(data) except Service.JsonRpcException, e: return return_error(None, e.code, e.info) id, method, params = data.get('id'), data['method'], data.get('params', '') if not method in methods: return return_error(id, -32601, data='Method "%s" does not exist' % method) try: if isinstance(params, dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() if must_respond: return return_response(id, s) else: return '' except HTTP, e: raise e except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except: etype, eval, etb = sys.exc_info() data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb)) logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb))) return return_error(id, -32099, data=data) def serve_xmlrpc(self): request = current.request response = current.response services = self.xmlrpc_procedures.values() return response.xmlrpc(request, services) def serve_amfrpc(self, version=0): try: import pyamf import pyamf.remoting.gateway except: return "pyamf not installed or not in Python sys.path" request = current.request response = current.response if version == 3: services = self.amfrpc3_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) pyamf_request = pyamf.remoting.decode(request.body) else: services = self.amfrpc_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) context = pyamf.get_context(pyamf.AMF0) pyamf_request = pyamf.remoting.decode(request.body, context) pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion) for name, message in pyamf_request: pyamf_response[name] = base_gateway.getProcessor(message)(message) response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE if version == 3: return pyamf.remoting.encode(pyamf_response).getvalue() else: return pyamf.remoting.encode(pyamf_response, context).getvalue() def serve_soap(self, version="1.1"): try: from gluon.contrib.pysimplesoap.server import SoapDispatcher except: return "pysimplesoap not installed in contrib" request = current.request response = current.response procedures = self.soap_procedures location = "%s://%s%s" % ( request.env.wsgi_url_scheme, request.env.http_host, URL(r=request, f="call/soap", vars={})) namespace = 'namespace' in response and response.namespace or location documentation = response.description or '' dispatcher = SoapDispatcher( name=response.title, location=location, action=location, # SOAPAction namespace=namespace, prefix='pys', documentation=documentation, ns=True) for method, (function, returns, args, doc) in procedures.iteritems(): dispatcher.register_function(method, function, returns, args, doc) if request.env.request_method == 'POST': fault = {} # Process normal Soap Operation response.headers['Content-Type'] = 'text/xml' xml = dispatcher.dispatch(request.body.read(), fault=fault) if fault: # May want to consider populating a ticket here... response.status = 500 # return the soap response return xml elif 'WSDL' in request.vars: # Return Web Service Description response.headers['Content-Type'] = 'text/xml' return dispatcher.wsdl() elif 'op' in request.vars: # Return method help webpage response.headers['Content-Type'] = 'text/html' method = request.vars['op'] sample_req_xml, sample_res_xml, doc = dispatcher.help(method) body = [H1("Welcome to Web2Py SOAP webservice gateway"), A("See all webservice operations", _href=URL(r=request, f="call/soap", vars={})), H2(method), P(doc), UL(LI("Location: %s" % dispatcher.location), LI("Namespace: %s" % dispatcher.namespace), LI("SoapAction: %s" % dispatcher.action), ), H3("Sample SOAP XML Request Message:"), CODE(sample_req_xml, language="xml"), H3("Sample SOAP XML Response Message:"), CODE(sample_res_xml, language="xml"), ] return {'body': body} else: # Return general help and method list webpage response.headers['Content-Type'] = 'text/html' body = [H1("Welcome to Web2Py SOAP webservice gateway"), P(response.description), P("The following operations are available"), A("See WSDL for webservice description", _href=URL(r=request, f="call/soap", vars={"WSDL":None})), UL([LI(A("%s: %s" % (method, doc or ''), _href=URL(r=request, f="call/soap", vars={'op': method}))) for method, doc in dispatcher.list_methods()]), ] return {'body': body} def __call__(self): """ Registers services with:: service = Service() @service.run @service.rss @service.json @service.jsonrpc @service.xmlrpc @service.amfrpc @service.amfrpc3('domain') @service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,}) Exposes services with:: def call(): return service() You can call services with:: http://..../app/default/call/run?[parameters] http://..../app/default/call/rss?[parameters] http://..../app/default/call/json?[parameters] http://..../app/default/call/jsonrpc http://..../app/default/call/xmlrpc http://..../app/default/call/amfrpc http://..../app/default/call/amfrpc3 http://..../app/default/call/soap """ request = current.request if len(request.args) < 1: raise HTTP(404, "Not Found") arg0 = request.args(0) if arg0 == 'run': return self.serve_run(request.args[1:]) elif arg0 == 'rss': return self.serve_rss(request.args[1:]) elif arg0 == 'csv': return self.serve_csv(request.args[1:]) elif arg0 == 'xml': return self.serve_xml(request.args[1:]) elif arg0 == 'json': return self.serve_json(request.args[1:]) elif arg0 == 'jsonrpc': return self.serve_jsonrpc() elif arg0 == 'jsonrpc2': return self.serve_jsonrpc2() elif arg0 == 'xmlrpc': return self.serve_xmlrpc() elif arg0 == 'amfrpc': return self.serve_amfrpc() elif arg0 == 'amfrpc3': return self.serve_amfrpc(3) elif arg0 == 'soap': return self.serve_soap() else: self.error() def error(self): raise HTTP(404, "Object does not exist") def completion(callback): """ Executes a task on completion of the called action. Example: Use as:: from gluon.tools import completion @completion(lambda d: logging.info(repr(d))) def index(): return dict(message='hello') It logs the output of the function every time input is called. The argument of completion is executed in a new thread. """ def _completion(f): def __completion(*a, **b): d = None try: d = f(*a, **b) return d finally: thread.start_new_thread(callback, (d,)) return __completion return _completion def prettydate(d, T=lambda x: x): if isinstance(d, datetime.datetime): dt = datetime.datetime.now() - d elif isinstance(d, datetime.date): dt = datetime.date.today() - d elif not d: return '' else: return '[invalid date]' if dt.days < 0: suffix = ' from now' dt = -dt else: suffix = ' ago' if dt.days >= 2 * 365: return T('%d years' + suffix) % int(dt.days / 365) elif dt.days >= 365: return T('1 year' + suffix) elif dt.days >= 60: return T('%d months' + suffix) % int(dt.days / 30) elif dt.days > 21: return T('1 month' + suffix) elif dt.days >= 14: return T('%d weeks' + suffix) % int(dt.days / 7) elif dt.days >= 7: return T('1 week' + suffix) elif dt.days > 1: return T('%d days' + suffix) % dt.days elif dt.days == 1: return T('1 day' + suffix) elif dt.seconds >= 2 * 60 * 60: return T('%d hours' + suffix) % int(dt.seconds / 3600) elif dt.seconds >= 60 * 60: return T('1 hour' + suffix) elif dt.seconds >= 2 * 60: return T('%d minutes' + suffix) % int(dt.seconds / 60) elif dt.seconds >= 60: return T('1 minute' + suffix) elif dt.seconds > 1: return T('%d seconds' + suffix) % dt.seconds elif dt.seconds == 1: return T('1 second' + suffix) else: return T('now') def test_thread_separation(): def f(): c = PluginManager() lock1.acquire() lock2.acquire() c.x = 7 lock1.release() lock2.release() lock1 = thread.allocate_lock() lock2 = thread.allocate_lock() lock1.acquire() thread.start_new_thread(f, ()) a = PluginManager() a.x = 5 lock1.release() lock2.acquire() return a.x class PluginManager(object): """ Plugin Manager is similar to a storage object but it is a single level singleton. This means that multiple instances within the same thread share the same attributes. Its constructor is also special. The first argument is the name of the plugin you are defining. The named arguments are parameters needed by the plugin with default values. If the parameters were previous defined, the old values are used. Example: in some general configuration file:: plugins = PluginManager() plugins.me.param1=3 within the plugin model:: _ = PluginManager('me',param1=5,param2=6,param3=7) where the plugin is used:: >>> print plugins.me.param1 3 >>> print plugins.me.param2 6 >>> plugins.me.param3 = 8 >>> print plugins.me.param3 8 Here are some tests:: >>> a=PluginManager() >>> a.x=6 >>> b=PluginManager('check') >>> print b.x 6 >>> b=PluginManager() # reset settings >>> print b.x <Storage {}> >>> b.x=7 >>> print a.x 7 >>> a.y.z=8 >>> print b.y.z 8 >>> test_thread_separation() 5 >>> plugins=PluginManager('me',db='mydb') >>> print plugins.me.db mydb >>> print 'me' in plugins True >>> print plugins.me.installed True """ instances = {} def __new__(cls, *a, **b): id = thread.get_ident() lock = thread.allocate_lock() try: lock.acquire() try: return cls.instances[id] except KeyError: instance = object.__new__(cls, *a, **b) cls.instances[id] = instance return instance finally: lock.release() def __init__(self, plugin=None, **defaults): if not plugin: self.__dict__.clear() settings = self.__getattr__(plugin) settings.installed = True settings.update( (k, v) for k, v in defaults.items() if not k in settings) def __getattr__(self, key): if not key in self.__dict__: self.__dict__[key] = Storage() return self.__dict__[key] def keys(self): return self.__dict__.keys() def __contains__(self, key): return key in self.__dict__ class Expose(object): def __init__(self, base=None, basename=None, extensions=None, allow_download=True): """ Examples: Use as:: def static(): return dict(files=Expose()) or:: def static(): path = os.path.join(request.folder,'static','public') return dict(files=Expose(path,basename='public')) Args: extensions: an optional list of file extensions for filtering displayed files: e.g. `['.py', '.jpg']` allow_download: whether to allow downloading selected files """ current.session.forget() base = base or os.path.join(current.request.folder, 'static') basename = basename or current.request.function self.basename = basename if current.request.raw_args: self.args = [arg for arg in current.request.raw_args.split('/') if arg] else: self.args = [arg for arg in current.request.args if arg] filename = os.path.join(base, *self.args) if not os.path.exists(filename): raise HTTP(404, "FILE NOT FOUND") if not os.path.normpath(filename).startswith(base): raise HTTP(401, "NOT AUTHORIZED") if allow_download and not os.path.isdir(filename): current.response.headers['Content-Type'] = contenttype(filename) raise HTTP(200, open(filename, 'rb'), **current.response.headers) self.path = path = os.path.join(filename, '*') self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if os.path.isdir(f) and not self.isprivate(f)] self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if not os.path.isdir(f) and not self.isprivate(f)] if 'README' in self.filenames: readme = open(os.path.join(filename, 'README')).read() self.paragraph = MARKMIN(readme) else: self.paragraph = None if extensions: self.filenames = [f for f in self.filenames if os.path.splitext(f)[-1] in extensions] def breadcrumbs(self, basename): path = [] span = SPAN() span.append(A(basename, _href=URL())) for arg in self.args: span.append('/') path.append(arg) span.append(A(arg, _href=URL(args='/'.join(path)))) return span def table_folders(self): if self.folders: return SPAN(H3('Folders'), TABLE( *[TR(TD(A(folder, _href=URL(args=self.args + [folder])))) for folder in self.folders], **dict(_class="table"))) return '' @staticmethod def isprivate(f): return 'private' in f or f.startswith('.') or f.endswith('~') @staticmethod def isimage(f): return os.path.splitext(f)[-1].lower() in ( '.png', '.jpg', '.jpeg', '.gif', '.tiff') def table_files(self, width=160): if self.filenames: return SPAN(H3('Files'), TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))), TD(IMG(_src=URL(args=self.args + [f]), _style='max-width:%spx' % width) if width and self.isimage(f) else '')) for f in self.filenames], **dict(_class="table"))) return '' def xml(self): return DIV( H2(self.breadcrumbs(self.basename)), self.paragraph or '', self.table_folders(), self.table_files()).xml() class Wiki(object): everybody = 'everybody' rows_page = 25 def markmin_base(self, body): return MARKMIN(body, extra=self.settings.extra, url=True, environment=self.env, autolinks=lambda link: expand_one(link, {})).xml() def render_tags(self, tags): return DIV( _class='w2p_wiki_tags', *[A(t.strip(), _href=URL(args='_search', vars=dict(q=t))) for t in tags or [] if t.strip()]) def markmin_render(self, page): return self.markmin_base(page.body) + self.render_tags(page.tags).xml() def html_render(self, page): html = page.body # @///function -> http://..../function html = replace_at_urls(html, URL) # http://...jpg -> <img src="http://...jpg/> or embed html = replace_autolinks(html, lambda link: expand_one(link, {})) # @{component:name} -> <script>embed component name</script> html = replace_components(html, self.env) html = html + self.render_tags(page.tags).xml() return html @staticmethod def component(text): """ In wiki docs allows `@{component:controller/function/args}` which renders as a `LOAD(..., ajax=True)` """ items = text.split('/') controller, function, args = items[0], items[1], items[2:] return LOAD(controller, function, args=args, ajax=True).xml() def get_renderer(self): if isinstance(self.settings.render, basestring): r = getattr(self, "%s_render" % self.settings.render) elif callable(self.settings.render): r = self.settings.render elif isinstance(self.settings.render, dict): def custom_render(page): if page.render: if page.render in self.settings.render.keys(): my_render = self.settings.render[page.render] else: my_render = getattr(self, "%s_render" % page.render) else: my_render = self.markmin_render return my_render(page) r = custom_render else: raise ValueError( "Invalid render type %s" % type(self.settings.render)) return r def __init__(self, auth, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None, groups=None): settings = self.settings = auth.settings.wiki """ Args: render: - "markmin" - "html" - `<function>` : Sets a custom render function - `dict(html=<function>, markmin=...)`: dict(...) allows multiple custom render functions - "multiple" : Is the same as `{}`. It enables per-record formats using builtins """ engines = set(['markmin', 'html']) show_engine = False if render == "multiple": render = {} if isinstance(render, dict): [engines.add(key) for key in render] show_engine = True settings.render = render perms = settings.manage_permissions = manage_permissions settings.force_prefix = force_prefix settings.restrict_search = restrict_search settings.extra = extra or {} settings.menu_groups = menu_groups settings.templates = templates settings.controller = controller settings.function = function settings.groups = auth.user_groups.values() \ if groups is None else groups db = auth.db self.env = env or {} self.env['component'] = Wiki.component self.auth = auth self.wiki_menu_items = None if self.auth.user: self.settings.force_prefix = force_prefix % self.auth.user else: self.settings.force_prefix = force_prefix self.host = current.request.env.http_host table_definitions = [ ('wiki_page', { 'args': [ Field('slug', requires=[IS_SLUG(), IS_NOT_IN_DB(db, 'wiki_page.slug')], writable=False), Field('title', length=255, unique=True), Field('body', 'text', notnull=True), Field('tags', 'list:string'), Field('can_read', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('can_edit', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('changelog'), Field('html', 'text', compute=self.get_renderer(), readable=False, writable=False), Field('render', default="markmin", readable=show_engine, writable=show_engine, requires=IS_EMPTY_OR( IS_IN_SET(engines))), auth.signature], 'vars': {'format': '%(title)s', 'migrate': migrate}}), ('wiki_tag', { 'args': [ Field('name'), Field('wiki_page', 'reference wiki_page'), auth.signature], 'vars':{'format': '%(title)s', 'migrate': migrate}}), ('wiki_media', { 'args': [ Field('wiki_page', 'reference wiki_page'), Field('title', required=True), Field('filename', 'upload', required=True), auth.signature], 'vars': {'format': '%(title)s', 'migrate': migrate}}), ] # define only non-existent tables for key, value in table_definitions: args = [] if not key in db.tables(): # look for wiki_ extra fields in auth.settings extra_fields = auth.settings.extra_fields if extra_fields: if key in extra_fields: if extra_fields[key]: for field in extra_fields[key]: args.append(field) args += value['args'] db.define_table(key, *args, **value['vars']) if self.settings.templates is None and not \ self.settings.manage_permissions: self.settings.templates = db.wiki_page.tags.contains('template') & \ db.wiki_page.can_read.contains('everybody') def update_tags_insert(page, id, db=db): for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=id) def update_tags_update(dbset, page, db=db): page = dbset.select(limitby=(0, 1)).first() db(db.wiki_tag.wiki_page == page.id).delete() for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=page.id) db.wiki_page._after_insert.append(update_tags_insert) db.wiki_page._after_update.append(update_tags_update) if (auth.user and check_credentials(current.request, gae_login=False) and not 'wiki_editor' in auth.user_groups.values() and self.settings.groups == auth.user_groups.values()): group = db.auth_group(role='wiki_editor') gid = group.id if group else db.auth_group.insert( role='wiki_editor') auth.add_membership(gid) settings.lock_keys = True # WIKI ACCESS POLICY def not_authorized(self, page=None): raise HTTP(401) def can_read(self, page): if 'everybody' in page.can_read or not \ self.settings.manage_permissions: return True elif self.auth.user: groups = self.settings.groups if ('wiki_editor' in groups or set(groups).intersection(set(page.can_read + page.can_edit)) or page.created_by == self.auth.user.id): return True return False def can_edit(self, page=None): if not self.auth.user: redirect(self.auth.settings.login_url) groups = self.settings.groups return ('wiki_editor' in groups or (page is None and 'wiki_author' in groups) or not page is None and ( set(groups).intersection(set(page.can_edit)) or page.created_by == self.auth.user.id)) def can_manage(self): if not self.auth.user: return False groups = self.settings.groups return 'wiki_editor' in groups def can_search(self): return True def can_see_menu(self): if self.auth.user: if self.settings.menu_groups is None: return True else: groups = self.settings.groups if any(t in self.settings.menu_groups for t in groups): return True return False ### END POLICY def automenu(self): """adds the menu if not present""" if (not self.wiki_menu_items and self.settings.controller and self.settings.function): self.wiki_menu_items = self.menu(self.settings.controller, self.settings.function) current.response.menu += self.wiki_menu_items def __call__(self): request = current.request settings = self.settings settings.controller = settings.controller or request.controller settings.function = settings.function or request.function self.automenu() zero = request.args(0) or 'index' if zero and zero.isdigit(): return self.media(int(zero)) elif not zero or not zero.startswith('_'): return self.read(zero) elif zero == '_edit': return self.edit(request.args(1) or 'index', request.args(2) or 0) elif zero == '_editmedia': return self.editmedia(request.args(1) or 'index') elif zero == '_create': return self.create() elif zero == '_pages': return self.pages() elif zero == '_search': return self.search() elif zero == '_recent': ipage = int(request.vars.page or 0) query = self.auth.db.wiki_page.created_by == request.args( 1, cast=int) return self.search(query=query, orderby=~self.auth.db.wiki_page.created_on, limitby=(ipage * self.rows_page, (ipage + 1) * self.rows_page), ) elif zero == '_cloud': return self.cloud() elif zero == '_preview': return self.preview(self.get_renderer()) def first_paragraph(self, page): if not self.can_read(page): mm = (page.body or '').replace('\r', '') ps = [p for p in mm.split('\n\n') if not p.startswith('#') and p.strip()] if ps: return ps[0] return '' def fix_hostname(self, body): return (body or '').replace('://HOSTNAME', '://%s' % self.host) def read(self, slug, force_render=False): if slug in '_cloud': return self.cloud() elif slug in '_search': return self.search() page = self.auth.db.wiki_page(slug=slug) if page and (not self.can_read(page)): return self.not_authorized(page) if current.request.extension == 'html': if not page: url = URL(args=('_create', slug)) return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn")) else: html = page.html if not force_render else self.get_renderer()(page) content = XML(self.fix_hostname(html)) return dict(title=page.title, slug=page.slug, page=page, content=content, tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) elif current.request.extension == 'load': return self.fix_hostname(page.html) if page else '' else: if not page: raise HTTP(404) else: return dict(title=page.title, slug=page.slug, page=page, content=page.body, tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) def edit(self, slug, from_template=0): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not self.can_edit(page): return self.not_authorized(page) title_guess = ' '.join(c.capitalize() for c in slug.split('-')) if not page: if not (self.can_manage() or slug.startswith(self.settings.force_prefix)): current.session.flash = 'slug must have "%s" prefix' \ % self.settings.force_prefix redirect(URL(args=('_create'))) db.wiki_page.can_read.default = [Wiki.everybody] db.wiki_page.can_edit.default = [auth.user_group_role()] db.wiki_page.title.default = title_guess db.wiki_page.slug.default = slug if slug == 'wiki-menu': db.wiki_page.body.default = \ '- Menu Item > @////index\n- - Submenu > http://web2py.com' else: db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \ if int(from_template) > 0 else '## %s\n\npage content' % title_guess vars = current.request.post_vars if vars.body: vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME') form = SQLFORM(db.wiki_page, page, deletable=True, formstyle='table2cols', showid=False).process() if form.deleted: current.session.flash = 'page deleted' redirect(URL()) elif form.accepted: current.session.flash = 'page created' redirect(URL(args=slug)) script = """ jQuery(function() { if (!jQuery('#wiki_page_body').length) return; var pagecontent = jQuery('#wiki_page_body'); pagecontent.css('font-family', 'Monaco,Menlo,Consolas,"Courier New",monospace'); var prevbutton = jQuery('<button class="btn nopreview">Preview</button>'); var preview = jQuery('<div id="preview"></div>').hide(); var previewmedia = jQuery('<div id="previewmedia"></div>'); var form = pagecontent.closest('form'); preview.insertBefore(form); prevbutton.insertBefore(form); if(%(link_media)s) { var mediabutton = jQuery('<button class="btn nopreview">Media</button>'); mediabutton.insertBefore(form); previewmedia.insertBefore(form); mediabutton.click(function() { if (mediabutton.hasClass('nopreview')) { web2py_component('%(urlmedia)s', 'previewmedia'); } else { previewmedia.empty(); } mediabutton.toggleClass('nopreview'); }); } prevbutton.click(function(e) { e.preventDefault(); if (prevbutton.hasClass('nopreview')) { prevbutton.addClass('preview').removeClass( 'nopreview').html('Edit Source'); try{var wiki_render = jQuery('#wiki_page_render').val()} catch(e){var wiki_render = null;} web2py_ajax_page('post', \ '%(url)s', {body: jQuery('#wiki_page_body').val(), \ render: wiki_render}, 'preview'); form.fadeOut('fast', function() {preview.fadeIn()}); } else { prevbutton.addClass( 'nopreview').removeClass('preview').html('Preview'); preview.fadeOut('fast', function() {form.fadeIn()}); } }) }) """ % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'), urlmedia=URL(extension='load', args=('_editmedia', slug), vars=dict(embedded=1))) return dict(content=TAG[''](form, SCRIPT(script))) def editmedia(self, slug): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not (page and self.can_edit(page)): return self.not_authorized(page) self.auth.db.wiki_media.id.represent = lambda id, row: \ id if not row.filename else \ SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1])) self.auth.db.wiki_media.wiki_page.default = page.id self.auth.db.wiki_media.wiki_page.writable = False links = [] csv = True create = True if current.request.vars.embedded: script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;" fragment = self.auth.db.wiki_media.id.represent csv = False create = False links= [ lambda row: A('copy into source', _href='#', _onclick=script % (fragment(row.id, row))) ] content = SQLFORM.grid( self.auth.db.wiki_media.wiki_page == page.id, orderby=self.auth.db.wiki_media.title, links=links, csv=csv, create=create, args=['_editmedia', slug], user_signature=False) return dict(content=content) def create(self): if not self.can_edit(): return self.not_authorized() db = self.auth.db slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug) options = [OPTION(row.slug, _value=row.id) for row in slugs] options.insert(0, OPTION('', _value='')) fields = [Field("slug", default=current.request.args(1) or self.settings.force_prefix, requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))),] if self.settings.templates: fields.append( Field("from_template", "reference wiki_page", requires=IS_EMPTY_OR( IS_IN_DB(db(self.settings.templates), db.wiki_page._id, '%(slug)s')), comment=current.T( "Choose Template or empty for new Page"))) form = SQLFORM.factory(*fields, **dict(_class="well")) form.element("[type=submit]").attributes["_value"] = \ current.T("Create Page from Slug") if form.process().accepted: form.vars.from_template = 0 if not form.vars.from_template \ else form.vars.from_template redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param return dict(content=form) def pages(self): if not self.can_manage(): return self.not_authorized() self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN( '@////%s' % slug) self.auth.db.wiki_page.title.represent = lambda title, row: \ A(title, _href=URL(args=row.slug)) wiki_table = self.auth.db.wiki_page content = SQLFORM.grid( wiki_table, fields=[wiki_table.slug, wiki_table.title, wiki_table.tags, wiki_table.can_read, wiki_table.can_edit], links=[ lambda row: A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'), lambda row: A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')], details=False, editable=False, deletable=False, create=False, orderby=self.auth.db.wiki_page.title, args=['_pages'], user_signature=False) return dict(content=content) def media(self, id): request, response, db = current.request, current.response, self.auth.db media = db.wiki_media(id) if media: if self.settings.manage_permissions: page = db.wiki_page(media.wiki_page) if not self.can_read(page): return self.not_authorized(page) request.args = [media.filename] m = response.download(request, db) current.session.forget() # get rid of the cookie response.headers['Last-Modified'] = \ request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT") if 'Content-Disposition' in response.headers: del response.headers['Content-Disposition'] response.headers['Pragma'] = 'cache' response.headers['Cache-Control'] = 'private' return m else: raise HTTP(404) def menu(self, controller='default', function='index'): db = self.auth.db request = current.request menu_page = db.wiki_page(slug='wiki-menu') menu = [] if menu_page: tree = {'': menu} regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)') for match in regex.finditer(self.fix_hostname(menu_page.body)): base = match.group('base').replace(' ', '') title = match.group('title') link = match.group('link') title_page = None if link.startswith('@'): items = link[2:].split('/') if len(items) > 3: title_page = items[3] link = URL(a=items[0] or None, c=items[1] or controller, f=items[2] or function, args=items[3:]) parent = tree.get(base[1:], tree['']) subtree = [] tree[base] = subtree parent.append((current.T(title), request.args(0) == title_page, link, subtree)) if self.can_see_menu(): submenu = [] menu.append((current.T('[Wiki]'), None, None, submenu)) if URL() == URL(controller, function): if not str(request.args(0)).startswith('_'): slug = request.args(0) or 'index' mode = 1 elif request.args(0) == '_edit': slug = request.args(1) or 'index' mode = 2 elif request.args(0) == '_editmedia': slug = request.args(1) or 'index' mode = 3 else: mode = 0 if mode in (2, 3): submenu.append((current.T('View Page'), None, URL(controller, function, args=slug))) if mode in (1, 3): submenu.append((current.T('Edit Page'), None, URL(controller, function, args=('_edit', slug)))) if mode in (1, 2): submenu.append((current.T('Edit Page Media'), None, URL(controller, function, args=('_editmedia', slug)))) submenu.append((current.T('Create New Page'), None, URL(controller, function, args=('_create')))) # Moved next if to inside self.auth.user check if self.can_manage(): submenu.append((current.T('Manage Pages'), None, URL(controller, function, args=('_pages')))) submenu.append((current.T('Edit Menu'), None, URL(controller, function, args=('_edit', 'wiki-menu')))) # Also moved inside self.auth.user check submenu.append((current.T('Search Pages'), None, URL(controller, function, args=('_search')))) return menu def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or not query is None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content) def cloud(self): db = self.auth.db count = db.wiki_tag.wiki_page.count(distinct=True) ids = db(db.wiki_tag).select( db.wiki_tag.name, count, distinct=True, groupby=db.wiki_tag.name, orderby=~count, limitby=(0, 20)) if ids: a, b = ids[0](count), ids[-1](count) def style(c): STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem' size = (1.5 * (c - b) / max(a - b, 1) + 1.3) return STYLE % (1.3, size) items = [] for item in ids: items.append(A(item.wiki_tag.name, _style=style(item(count)), _href=URL(args='_search', vars=dict(q=item.wiki_tag.name)))) items.append(' ') return dict(content=DIV(_class='w2p_cloud', *items)) def preview(self, render): request = current.request # FIXME: This is an ugly hack to ensure a default render # engine if not specified (with multiple render engines) if not "render" in request.post_vars: request.post_vars.render = None return render(request.post_vars) class Config(object): def __init__( self, filename, section, default_values={} ): self.config = ConfigParser.ConfigParser(default_values) self.config.read(filename) if not self.config.has_section(section): self.config.add_section(section) self.section = section self.filename = filename def read(self): if not(isinstance(current.session['settings_%s' % self.section], dict)): settings = dict(self.config.items(self.section)) else: settings = current.session['settings_%s' % self.section] return settings def save(self, options): for option, value in options: self.config.set(self.section, option, value) try: self.config.write(open(self.filename, 'w')) result = True except: current.session['settings_%s' % self.section] = dict(self.config.items(self.section)) result = False return result if __name__ == '__main__': import doctest doctest.testmod()
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1731_1
crossvul-python_data_good_1915_11
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import logging import random import sys import urllib.parse from io import BytesIO from typing import Callable, Dict, List, Optional, Tuple, Union import attr import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json from twisted.internet import defer from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import IReactorTime from twisted.internet.task import _EPSILON, Cooperator from twisted.web.http_headers import Headers from twisted.web.iweb import IBodyProducer, IResponse import synapse.metrics import synapse.util.retryutils from synapse.api.errors import ( FederationDeniedError, HttpResponseException, RequestSendFailed, ) from synapse.http import QuieterFileBodyProducer from synapse.http.client import ( BlacklistingAgentWrapper, BlacklistingReactorWrapper, encode_query_args, readBodyToFile, ) from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import ( inject_active_span_byte_dict, set_tag, start_active_span, tags, ) from synapse.types import JsonDict from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure logger = logging.getLogger(__name__) outgoing_requests_counter = Counter( "synapse_http_matrixfederationclient_requests", "", ["method"] ) incoming_responses_counter = Counter( "synapse_http_matrixfederationclient_responses", "", ["method", "code"] ) MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize _next_id = 1 QueryArgs = Dict[str, Union[str, List[str]]] @attr.s(slots=True, frozen=True) class MatrixFederationRequest: method = attr.ib(type=str) """HTTP method """ path = attr.ib(type=str) """HTTP path """ destination = attr.ib(type=str) """The remote server to send the HTTP request to. """ json = attr.ib(default=None, type=Optional[JsonDict]) """JSON to send in the body. """ json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]]) """A callback to generate the JSON. """ query = attr.ib(default=None, type=Optional[dict]) """Query arguments. """ txn_id = attr.ib(default=None, type=Optional[str]) """Unique ID for this request (for logging) """ uri = attr.ib(init=False, type=bytes) """The URI of this request """ def __attrs_post_init__(self) -> None: global _next_id txn_id = "%s-O-%s" % (self.method, _next_id) _next_id = (_next_id + 1) % (MAXINT - 1) object.__setattr__(self, "txn_id", txn_id) destination_bytes = self.destination.encode("ascii") path_bytes = self.path.encode("ascii") if self.query: query_bytes = encode_query_args(self.query) else: query_bytes = b"" # The object is frozen so we can pre-compute this. uri = urllib.parse.urlunparse( (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"") ) object.__setattr__(self, "uri", uri) def get_json(self) -> Optional[JsonDict]: if self.json_callback: return self.json_callback() return self.json async def _handle_json_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, ) -> JsonDict: """ Reads the JSON body of a response, with a timeout Args: reactor: twisted reactor, for the timeout timeout_sec: number of seconds to wait for response to complete request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made Returns: The parsed JSON response """ try: check_content_type_is_json(response.headers) # Use the custom JSON decoder (partially re-implements treq.json_content). d = treq.text_content(response, encoding="utf-8") d.addCallback(json_decoder.decode) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) body = await make_deferred_yieldable(d) except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except Exception as e: logger.warning( "{%s} [%s] Error reading response %s %s: %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), e, ) raise time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, request.method, request.uri.decode("ascii"), ) return body class MatrixFederationHttpClient: """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. Attributes: agent (twisted.web.client.Agent): The twisted Agent used to send the requests. """ def __init__(self, hs, tls_client_options_factory): self.hs = hs self.signing_key = hs.signing_key self.server_name = hs.hostname # We need to use a DNS resolver which filters out blacklisted IP # addresses, to prevent DNS rebinding. self.reactor = BlacklistingReactorWrapper( hs.get_reactor(), None, hs.config.federation_ip_range_blacklist ) user_agent = hs.version_string if hs.config.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix) user_agent = user_agent.encode("ascii") self.agent = MatrixFederationAgent( self.reactor, tls_client_options_factory, user_agent, hs.config.federation_ip_range_blacklist, ) # Use a BlacklistingAgentWrapper to prevent circumventing the IP # blacklist via IP literals in server names self.agent = BlacklistingAgentWrapper( self.agent, ip_blacklist=hs.config.federation_ip_range_blacklist, ) self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout = 60 def schedule(x): self.reactor.callLater(_EPSILON, x) self._cooperator = Cooperator(scheduler=schedule) async def _send_request_with_optional_trailing_slash( self, request: MatrixFederationRequest, try_trailing_slash_on_400: bool = False, **send_request_args ) -> IResponse: """Wrapper for _send_request which can optionally retry the request upon receiving a combination of a 400 HTTP response code and a 'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3 due to #3622. Args: request: details of request to be sent try_trailing_slash_on_400: Whether on receiving a 400 'M_UNRECOGNIZED' from the server to retry the request with a trailing slash appended to the request path. send_request_args: A dictionary of arguments to pass to `_send_request()`. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). Returns: Parsed JSON response body. """ try: response = await self._send_request(request, **send_request_args) except HttpResponseException as e: # Received an HTTP error > 300. Check if it meets the requirements # to retry with a trailing slash if not try_trailing_slash_on_400: raise if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED": raise # Retry with a trailing slash if we received a 400 with # 'M_UNRECOGNIZED' which some endpoints can return when omitting a # trailing slash on Synapse <= v0.99.3. logger.info("Retrying request with trailing slash") # Request is frozen so we create a new instance request = attr.evolve(request, path=request.path + "/") response = await self._send_request(request, **send_request_args) return response async def _send_request( self, request: MatrixFederationRequest, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, ) -> IResponse: """ Sends a request to the given server. Args: request: details of request to be sent retry_on_dns_fail: true if the request should be retied on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries: whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: Back off if we get a 404 Returns: Resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if ( self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(request.destination) limiter = await synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" scope = start_active_span( "outgoing-federation-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.PEER_ADDRESS: request.destination, tags.HTTP_METHOD: request.method, tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict = {} # type: Dict[bytes, List[bytes]] inject_active_span_byte_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = request.uri url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"") ) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json ) data = encode_canonical_json(json) producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator ) # type: Optional[IBodyProducer] else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes ) headers_dict[b"Authorization"] = auth_headers logger.debug( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) outgoing_requests_counter.labels(request.method).inc() try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. request_deferred = self.agent.request( method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = await request_deferred except DNSLookupError as e: raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e except Exception as e: raise RequestSendFailed(e, can_retry=True) from e incoming_responses_counter.labels( request.method, response.code ).inc() set_tag(tags.HTTP_STATUS_CODE, response.code) response_phrase = response.phrase.decode("ascii", errors="replace") if 200 <= response.code < 300: logger.debug( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) pass else: logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.reactor ) try: body = await make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warning( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None exc = HttpResponseException( response.code, response_phrase, body ) # Retry if the error is a 429 (Too Many Requests), # otherwise just raise a standard HttpResponseException if response.code == 429: raise RequestSendFailed(exc, can_retry=True) from exc else: raise exc break except RequestSendFailed as e: logger.info( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) await self.clock.sleep(delay) retries_left -= 1 else: raise except Exception as e: logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response def build_auth_headers( self, destination: Optional[bytes], method: bytes, url_bytes: bytes, content: Optional[JsonDict] = None, destination_is: Optional[bytes] = None, ) -> List[bytes]: """ Builds the Authorization headers for a federation request Args: destination: The destination homeserver of the request. May be None if the destination is an identity server, in which case destination_is must be non-None. method: The HTTP method of the request url_bytes: The URI path of the request content: The body of the request destination_is: As 'destination', but if the destination is an identity server Returns: A list of headers to be added as "Authorization:" headers """ request = { "method": method.decode("ascii"), "uri": url_bytes.decode("ascii"), "origin": self.server_name, } if destination is not None: request["destination"] = destination.decode("ascii") if destination_is is not None: request["destination_is"] = destination_is.decode("ascii") if content is not None: request["content"] = content request = sign_json(request, self.server_name, self.signing_key) auth_headers = [] for key, sig in request["signatures"][self.server_name].items(): auth_headers.append( ( 'X-Matrix origin=%s,key="%s",sig="%s"' % (self.server_name, key, sig) ).encode("ascii") ) return auth_headers async def put_json( self, destination: str, path: str, args: Optional[QueryArgs] = None, data: Optional[JsonDict] = None, json_data_callback: Optional[Callable[[], JsonDict]] = None, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, ) -> Union[JsonDict, list]: """ Sends the specified json data using PUT Args: destination: The remote server to send the HTTP request to. path: The HTTP path. args: query params data: A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback: A callable returning the dict to use as the request body. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: True if we should count a 404 response as a failure of the server (and should therefore back off future requests). try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. This will be attempted before backing off if backing off has been enabled. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="PUT", destination=destination, path=path, query=args, json_callback=json_data_callback, json=data, ) start_ms = self.clock.time_msec() response = await self._send_request_with_optional_trailing_slash( request, try_trailing_slash_on_400, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, long_retries=long_retries, timeout=timeout, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def post_json( self, destination: str, path: str, data: Optional[JsonDict] = None, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryArgs] = None, ) -> Union[JsonDict, list]: """ Sends the specified json data using POST Args: destination: The remote server to send the HTTP request to. path: The HTTP path. data: A dict containing the data that will be used as the request body. This will be encoded as JSON. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. args: query params Returns: dict|list: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="POST", destination=destination, path=path, query=args, json=data ) start_ms = self.clock.time_msec() response = await self._send_request( request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms, ) return body async def get_json( self, destination: str, path: str, args: Optional[QueryArgs] = None, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, ) -> Union[JsonDict, list]: """ GETs some json from the given host homeserver and path Args: destination: The remote server to send the HTTP request to. path: The HTTP path. args: A dictionary used to create query strings, defaults to None. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="GET", destination=destination, path=path, query=args ) start_ms = self.clock.time_msec() response = await self._send_request_with_optional_trailing_slash( request, try_trailing_slash_on_400, backoff_on_404=False, ignore_backoff=ignore_backoff, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def delete_json( self, destination: str, path: str, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryArgs] = None, ) -> Union[JsonDict, list]: """Send a DELETE request to the remote expecting some json response Args: destination: The remote server to send the HTTP request to. path: The HTTP path. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. args: query params Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="DELETE", destination=destination, path=path, query=args ) start_ms = self.clock.time_msec() response = await self._send_request( request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def get_file( self, destination: str, path: str, output_stream, args: Optional[QueryArgs] = None, retry_on_dns_fail: bool = True, max_size: Optional[int] = None, ignore_backoff: bool = False, ) -> Tuple[int, Dict[bytes, List[bytes]]]: """GETs a file from a given homeserver Args: destination: The remote server to send the HTTP request to. path: The HTTP path to GET. output_stream: File to write the response body to. args: Optional dictionary used to create the query string. ignore_backoff: true to ignore the historical backoff data and try the request anyway. Returns: Resolves with an (int,dict) tuple of the file length and a dict of the response headers. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="GET", destination=destination, path=path, query=args ) response = await self._send_request( request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff ) headers = dict(response.headers.getAllRawHeaders()) try: d = readBodyToFile(response, output_stream, max_size) d.addTimeout(self.default_timeout, self.reactor) length = await make_deferred_yieldable(d) except Exception as e: logger.warning( "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( "{%s} [%s] Completed: %d %s [%d bytes] %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), length, request.method, request.uri.decode("ascii"), ) return (length, headers) def _flatten_response_never_received(e): if hasattr(e, "reasons"): reasons = ", ".join( _flatten_response_never_received(f.value) for f in e.reasons ) return "%s:[%s]" % (type(e).__name__, reasons) else: return repr(e) def check_content_type_is_json(headers: Headers) -> None: """ Check that a set of HTTP headers have a Content-Type header, and that it is application/json. Args: headers: headers to check Raises: RequestSendFailed: if the Content-Type header is missing or isn't JSON """ c_type = headers.getRawHeaders(b"Content-Type") if c_type is None: raise RequestSendFailed( RuntimeError("No Content-Type header received from remote server"), can_retry=False, ) c_type = c_type[0].decode("ascii") # only the first header val, options = cgi.parse_header(c_type) if val != "application/json": raise RequestSendFailed( RuntimeError( "Remote server sent Content-Type header of '%s', not 'application/json'" % c_type, ), can_retry=False, )
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_11
crossvul-python_data_good_1915_12
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from prometheus_client import Counter from twisted.internet.error import AlreadyCalled, AlreadyCancelled from synapse.api.constants import EventTypes from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException from synapse.types import RoomStreamToken from . import push_rule_evaluator, push_tools logger = logging.getLogger(__name__) http_push_processed_counter = Counter( "synapse_http_httppusher_http_pushes_processed", "Number of push notifications successfully sent", ) http_push_failed_counter = Counter( "synapse_http_httppusher_http_pushes_failed", "Number of push notifications which failed", ) http_badges_processed_counter = Counter( "synapse_http_httppusher_badge_updates_processed", "Number of badge updates successfully sent", ) http_badges_failed_counter = Counter( "synapse_http_httppusher_badge_updates_failed", "Number of badge updates which failed", ) class HttpPusher: INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes MAX_BACKOFF_SEC = 60 * 60 # This one's in ms because we compare it against the clock GIVE_UP_AFTER_MS = 24 * 60 * 60 * 1000 def __init__(self, hs, pusherdict): self.hs = hs self.store = self.hs.get_datastore() self.storage = self.hs.get_storage() self.clock = self.hs.get_clock() self.state_handler = self.hs.get_state_handler() self.user_id = pusherdict["user_name"] self.app_id = pusherdict["app_id"] self.app_display_name = pusherdict["app_display_name"] self.device_display_name = pusherdict["device_display_name"] self.pushkey = pusherdict["pushkey"] self.pushkey_ts = pusherdict["ts"] self.data = pusherdict["data"] self.last_stream_ordering = pusherdict["last_stream_ordering"] self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.failing_since = pusherdict["failing_since"] self.timed_call = None self._is_processing = False self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room # This is the highest stream ordering we know it's safe to process. # When new events arrive, we'll be given a window of new events: we # should honour this rather than just looking for anything higher # because of potential out-of-order event serialisation. This starts # off as None though as we don't know any better. self.max_stream_ordering = None if "data" not in pusherdict: raise PusherConfigException("No 'data' key for HTTP pusher") self.data = pusherdict["data"] self.name = "%s/%s/%s" % ( pusherdict["user_name"], pusherdict["app_id"], pusherdict["pushkey"], ) if self.data is None: raise PusherConfigException("data can not be null for HTTP pusher") if "url" not in self.data: raise PusherConfigException("'url' required in data for HTTP pusher") self.url = self.data["url"] self.http_client = hs.get_proxied_blacklisted_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] def on_started(self, should_check_for_notifs): """Called when this pusher has been started. Args: should_check_for_notifs (bool): Whether we should immediately check for push to send. Set to False only if it's known there is nothing to send """ if should_check_for_notifs: self._start_processing() def on_new_notifications(self, max_token: RoomStreamToken): # We just use the minimum stream ordering and ignore the vector clock # component. This is safe to do as long as we *always* ignore the vector # clock components. max_stream_ordering = max_token.stream self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) self._start_processing() def on_new_receipts(self, min_stream_id, max_stream_id): # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, # but currently that's the only type of receipt anyway... run_as_background_process("http_pusher.on_new_receipts", self._update_badge) async def _update_badge(self): # XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems # to be largely redundant. perhaps we can remove it. badge = await push_tools.get_badge_count( self.hs.get_datastore(), self.user_id, group_by_room=self._group_unread_count_by_room, ) await self._send_badge(badge) def on_timer(self): self._start_processing() def on_stop(self): if self.timed_call: try: self.timed_call.cancel() except (AlreadyCalled, AlreadyCancelled): pass self.timed_call = None def _start_processing(self): if self._is_processing: return run_as_background_process("httppush.process", self._process) async def _process(self): # we should never get here if we are already processing assert not self._is_processing try: self._is_processing = True # if the max ordering changes while we're running _unsafe_process, # call it again, and so on until we've caught up. while True: starting_max_ordering = self.max_stream_ordering try: await self._unsafe_process() except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break finally: self._is_processing = False async def _unsafe_process(self): """ Looks for unset notifications and dispatch them, in order Never call this directly: use _process which will only allow this to run once per pusher. """ fn = self.store.get_unread_push_actions_for_user_in_range_for_http unprocessed = await fn( self.user_id, self.last_stream_ordering, self.max_stream_ordering ) logger.info( "Processing %i unprocessed push actions for %s starting at " "stream_ordering %s", len(unprocessed), self.name, self.last_stream_ordering, ) for push_action in unprocessed: with opentracing.start_active_span( "http-push", tags={ "authenticated_entity": self.user_id, "event_id": push_action["event_id"], "app_id": self.app_id, "app_display_name": self.app_display_name, }, ): processed = await self._process_one(push_action) if processed: http_push_processed_counter.inc() self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] pusher_still_exists = await self.store.update_pusher_last_stream_ordering_and_success( self.app_id, self.pushkey, self.user_id, self.last_stream_ordering, self.clock.time_msec(), ) if not pusher_still_exists: # The pusher has been deleted while we were processing, so # lets just stop and return. self.on_stop() return if self.failing_since: self.failing_since = None await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: http_push_failed_counter.inc() if not self.failing_since: self.failing_since = self.clock.time_msec() await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) if ( self.failing_since and self.failing_since < self.clock.time_msec() - HttpPusher.GIVE_UP_AFTER_MS ): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warning( "Giving up on a notification to user %s, pushkey %s", self.user_id, self.pushkey, ) self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] pusher_still_exists = await self.store.update_pusher_last_stream_ordering( self.app_id, self.pushkey, self.user_id, self.last_stream_ordering, ) if not pusher_still_exists: # The pusher has been deleted while we were processing, so # lets just stop and return. self.on_stop() return self.failing_since = None await self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: logger.info("Push failed: delaying for %ds", self.backoff_delay) self.timed_call = self.hs.get_reactor().callLater( self.backoff_delay, self.on_timer ) self.backoff_delay = min( self.backoff_delay * 2, self.MAX_BACKOFF_SEC ) break async def _process_one(self, push_action): if "notify" not in push_action["actions"]: return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) badge = await push_tools.get_badge_count( self.hs.get_datastore(), self.user_id, group_by_room=self._group_unread_count_by_room, ) event = await self.store.get_event(push_action["event_id"], allow_none=True) if event is None: return True # It's been redacted rejected = await self.dispatch_push(event, tweaks, badge) if rejected is False: return False if isinstance(rejected, list) or isinstance(rejected, tuple): for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warning( ("Ignoring rejected pushkey %s because we didn't send it"), pk, ) else: logger.info("Pushkey %s was rejected: removing", pk) await self.hs.remove_pusher(self.app_id, pk, self.user_id) return True async def _build_notification_dict(self, event, tweaks, badge): priority = "low" if ( event.type == EventTypes.Encrypted or tweaks.get("highlight") or tweaks.get("sound") ): # HACK send our push as high priority only if it generates a sound, highlight # or may do so (i.e. is encrypted so has unknown effects). priority = "high" if self.data.get("format") == "event_id_only": d = { "notification": { "event_id": event.event_id, "room_id": event.room_id, "counts": {"unread": badge}, "prio": priority, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, } ], } } return d ctx = await push_tools.get_context_for_event( self.storage, self.state_handler, event, self.user_id ) d = { "notification": { "id": event.event_id, # deprecated: remove soon "event_id": event.event_id, "room_id": event.room_id, "type": event.type, "sender": event.user_id, "prio": priority, "counts": { "unread": badge, # 'missed_calls': 2 }, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, "tweaks": tweaks, } ], } } if event.type == "m.room.member" and event.is_state(): d["notification"]["membership"] = event.content["membership"] d["notification"]["user_is_target"] = event.state_key == self.user_id if self.hs.config.push_include_content and event.content: d["notification"]["content"] = event.content # We no longer send aliases separately, instead, we send the human # readable name of the room, which may be an alias. if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0: d["notification"]["sender_display_name"] = ctx["sender_display_name"] if "name" in ctx and len(ctx["name"]) > 0: d["notification"]["room_name"] = ctx["name"] return d async def dispatch_push(self, event, tweaks, badge): notification_dict = await self._build_notification_dict(event, tweaks, badge) if not notification_dict: return [] try: resp = await self.http_client.post_json_get_json( self.url, notification_dict ) except Exception as e: logger.warning( "Failed to push event %s to %s: %s %s", event.event_id, self.name, type(e), e, ) return False rejected = [] if "rejected" in resp: rejected = resp["rejected"] return rejected async def _send_badge(self, badge): """ Args: badge (int): number of unread messages """ logger.debug("Sending updated badge count %d to %s", badge, self.name) d = { "notification": { "id": "", "type": None, "sender": "", "counts": {"unread": badge}, "devices": [ { "app_id": self.app_id, "pushkey": self.pushkey, "pushkey_ts": int(self.pushkey_ts / 1000), "data": self.data_minus_url, } ], } } try: await self.http_client.post_json_get_json(self.url, d) http_badges_processed_counter.inc() except Exception as e: logger.warning( "Failed to send badge count to %s: %s %s", self.name, type(e), e ) http_badges_failed_counter.inc()
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_12
crossvul-python_data_bad_3250_1
""" .. module: security_monkey.sso.views :platform: Unix :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more :license: Apache, see LICENSE for more details. .. moduleauthor:: Patrick Kelley <patrick@netflix.com> """ import jwt import base64 import requests from flask import Blueprint, current_app, redirect, request from flask.ext.restful import reqparse, Resource, Api from flask.ext.principal import Identity, identity_changed from flask_login import login_user try: from onelogin.saml2.auth import OneLogin_Saml2_Auth from onelogin.saml2.utils import OneLogin_Saml2_Utils onelogin_import_success = True except ImportError: onelogin_import_success = False from .service import fetch_token_header_payload, get_rsa_public_key from security_monkey.datastore import User from security_monkey import db, rbac from urlparse import urlparse mod = Blueprint('sso', __name__) api = Api(mod) from flask_security.utils import validate_redirect_url class Ping(Resource): """ This class serves as an example of how one might implement an SSO provider for use with Security Monkey. In this example we use a OpenIDConnect authentication flow, that is essentially OAuth2 underneath. """ decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() super(Ping, self).__init__() def get(self): return self.post() def post(self): if "ping" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Ping is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 default_state = 'clientId,{client_id},redirectUri,{redirectUri},return_to,{return_to}'.format( client_id=current_app.config.get('PING_CLIENT_ID'), redirectUri=current_app.config.get('PING_REDIRECT_URI'), return_to=current_app.config.get('WEB_PATH') ) self.reqparse.add_argument('code', type=str, required=True) self.reqparse.add_argument('state', type=str, required=False, default=default_state) args = self.reqparse.parse_args() client_id = args['state'].split(',')[1] redirect_uri = args['state'].split(',')[3] return_to = args['state'].split(',')[5] if not validate_redirect_url(return_to): return_to = current_app.config.get('WEB_PATH') # take the information we have received from the provider to create a new request params = { 'client_id': client_id, 'grant_type': 'authorization_code', 'scope': 'openid email profile address', 'redirect_uri': redirect_uri, 'code': args['code'] } # you can either discover these dynamically or simply configure them access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL') user_api_url = current_app.config.get('PING_USER_API_URL') # the secret and cliendId will be given to you when you signup for the provider basic = base64.b64encode(bytes('{0}:{1}'.format(client_id, current_app.config.get("PING_SECRET")))) headers = {'Authorization': 'Basic {0}'.format(basic.decode('utf-8'))} # exchange authorization code for access token. r = requests.post(access_token_url, headers=headers, params=params) id_token = r.json()['id_token'] access_token = r.json()['access_token'] # fetch token public key header_data = fetch_token_header_payload(id_token)[0] jwks_url = current_app.config.get('PING_JWKS_URL') # retrieve the key material as specified by the token header r = requests.get(jwks_url) for key in r.json()['keys']: if key['kid'] == header_data['kid']: secret = get_rsa_public_key(key['n'], key['e']) algo = header_data['alg'] break else: return dict(message='Key not found'), 403 # validate your token based on the key it was signed with try: current_app.logger.debug(id_token) current_app.logger.debug(secret) current_app.logger.debug(algo) jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=client_id) except jwt.DecodeError: return dict(message='Token is invalid'), 403 except jwt.ExpiredSignatureError: return dict(message='Token has expired'), 403 except jwt.InvalidTokenError: return dict(message='Token is invalid'), 403 user_params = dict(access_token=access_token, schema='profile') # retrieve information about the current user. r = requests.get(user_api_url, params=user_params) profile = r.json() user = User.query.filter(User.email==profile['email']).first() # if we get an sso user create them an account if not user: user = User( email=profile['email'], active=True, role='View' # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) return redirect(return_to, code=302) class Google(Resource): decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() super(Google, self).__init__() def get(self): return self.post() def post(self): if "google" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Google is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 default_state = 'clientId,{client_id},redirectUri,{redirectUri},return_to,{return_to}'.format( client_id=current_app.config.get("GOOGLE_CLIENT_ID"), redirectUri=api.url_for(Google), return_to=current_app.config.get('WEB_PATH') ) self.reqparse.add_argument('code', type=str, required=True) self.reqparse.add_argument('state', type=str, required=False, default=default_state) args = self.reqparse.parse_args() client_id = args['state'].split(',')[1] redirect_uri = args['state'].split(',')[3] return_to = args['state'].split(',')[5] if not validate_redirect_url(return_to): return_to = current_app.config.get('WEB_PATH') access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' args = self.reqparse.parse_args() # Step 1. Exchange authorization code for access token payload = { 'client_id': client_id, 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri, 'code': args['code'], 'client_secret': current_app.config.get('GOOGLE_SECRET') } r = requests.post(access_token_url, data=payload) token = r.json() # Step 1bis. Validate (some information of) the id token (if necessary) google_hosted_domain = current_app.config.get("GOOGLE_HOSTED_DOMAIN") if google_hosted_domain is not None: current_app.logger.debug('We need to verify that the token was issued for this hosted domain: %s ' % (google_hosted_domain)) # Get the JSON Web Token id_token = r.json()['id_token'] current_app.logger.debug('The id_token is: %s' % (id_token)) # Extract the payload (header_data, payload_data) = fetch_token_header_payload(id_token) current_app.logger.debug('id_token.header_data: %s' % (header_data)) current_app.logger.debug('id_token.payload_data: %s' % (payload_data)) token_hd = payload_data.get('hd') if token_hd != google_hosted_domain: current_app.logger.debug('Verification failed: %s != %s' % (token_hd, google_hosted_domain)) return dict(message='Token is invalid %s' % token), 403 current_app.logger.debug('Verification passed') # Step 2. Retrieve information about the current user headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} r = requests.get(people_api_url, headers=headers) profile = r.json() user = User.query.filter(User.email == profile['email']).first() # if we get an sso user create them an account if not user: user = User( email=profile['email'], active=True, role='View' # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) return redirect(return_to, code=302) class OneLogin(Resource): decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() self.req = OneLogin.prepare_from_flask_request(request) super(OneLogin, self).__init__() @staticmethod def prepare_from_flask_request(req): url_data = urlparse(req.url) return { 'http_host': req.host, 'server_port': url_data.port, 'script_name': req.path, 'get_data': req.args.copy(), 'post_data': req.form.copy(), 'https': ("on" if current_app.config.get("ONELOGIN_HTTPS") else "off") } def get(self): return self.post() def _consumer(self, auth): auth.process_response() errors = auth.get_errors() if not errors: if auth.is_authenticated(): return True else: return False else: current_app.logger.error('Error processing %s' % (', '.join(errors))) return False def post(self): if "onelogin" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Onelogin is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 auth = OneLogin_Saml2_Auth(self.req, current_app.config.get("ONELOGIN_SETTINGS")) self.reqparse.add_argument('return_to', required=False, default=current_app.config.get('WEB_PATH')) self.reqparse.add_argument('acs', required=False) self.reqparse.add_argument('sls', required=False) args = self.reqparse.parse_args() return_to = args['return_to'] if args['acs'] != None: # valids the SAML response and checks if successfully authenticated if self._consumer(auth): email = auth.get_attribute(current_app.config.get("ONELOGIN_EMAIL_FIELD"))[0] user = User.query.filter(User.email == email).first() # if we get an sso user create them an account if not user: user = User( email=email, active=True, role=current_app.config.get('ONELOGIN_DEFAULT_ROLE') # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) self_url = OneLogin_Saml2_Utils.get_self_url(self.req) if 'RelayState' in request.form and self_url != request.form['RelayState']: return redirect(auth.redirect_to(request.form['RelayState']), code=302) else: return redirect(current_app.config.get('BASE_URL'), code=302) else: return dict(message='OneLogin authentication failed.'), 403 elif args['sls'] != None: return dict(message='OneLogin SLS not implemented yet.'), 405 else: return redirect(auth.login(return_to=return_to)) class Providers(Resource): decorators = [rbac.allow(["anonymous"], ["GET"])] def __init__(self): super(Providers, self).__init__() def get(self): active_providers = [] for provider in current_app.config.get("ACTIVE_PROVIDERS"): provider = provider.lower() if provider == "ping": active_providers.append({ 'name': current_app.config.get("PING_NAME"), 'url': current_app.config.get('PING_REDIRECT_URI'), 'redirectUri': current_app.config.get("PING_REDIRECT_URI"), 'clientId': current_app.config.get("PING_CLIENT_ID"), 'responseType': 'code', 'scope': ['openid', 'profile', 'email'], 'scopeDelimiter': ' ', 'authorizationEndpoint': current_app.config.get("PING_AUTH_ENDPOINT"), 'requiredUrlParams': ['scope'], 'type': '2.0' }) elif provider == "google": google_provider = { 'name': 'google', 'clientId': current_app.config.get("GOOGLE_CLIENT_ID"), 'url': api.url_for(Google, _external=True, _scheme='https'), 'redirectUri': api.url_for(Google, _external=True, _scheme='https'), 'authorizationEndpoint': current_app.config.get("GOOGLE_AUTH_ENDPOINT"), 'scope': ['openid email'], 'responseType': 'code' } google_hosted_domain = current_app.config.get("GOOGLE_HOSTED_DOMAIN") if google_hosted_domain is not None: google_provider['hd'] = google_hosted_domain active_providers.append(google_provider) elif provider == "onelogin": active_providers.append({ 'name': 'OneLogin', 'authorizationEndpoint': api.url_for(OneLogin) }) else: raise Exception("Unknown authentication provider: {0}".format(provider)) return active_providers api.add_resource(Ping, '/auth/ping', endpoint='ping') api.add_resource(Google, '/auth/google', endpoint='google') api.add_resource(Providers, '/auth/providers', endpoint='providers') if onelogin_import_success: api.add_resource(OneLogin, '/auth/onelogin', endpoint='onelogin')
./CrossVul/dataset_final_sorted/CWE-601/py/bad_3250_1
crossvul-python_data_bad_3250_2
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from security_monkey import app, db from flask_wtf.csrf import generate_csrf from security_monkey.auth.models import RBACRole from security_monkey.decorators import crossdomain from flask_restful import fields, marshal, Resource, reqparse from flask_login import current_user ORIGINS = [ 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('WEB_PORT')), # Adding this next one so you can also access the dart UI by prepending /static to the path. 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('API_PORT')), 'https://{}:{}'.format(app.config.get('FQDN'), app.config.get('NGINX_PORT')), 'https://{}:80'.format(app.config.get('FQDN')) ] ##### Marshal Datastructures ##### # Used by RevisionGet, RevisionList, ItemList REVISION_FIELDS = { 'id': fields.Integer, 'date_created': fields.String, 'date_last_ephemeral_change': fields.String, 'active': fields.Boolean, 'item_id': fields.Integer } # Used by RevisionList, ItemGet, ItemList ITEM_FIELDS = { 'id': fields.Integer, 'region': fields.String, 'name': fields.String } # Used by ItemList, Justify AUDIT_FIELDS = { 'id': fields.Integer, 'score': fields.Integer, 'issue': fields.String, 'notes': fields.String, 'justified': fields.Boolean, 'justification': fields.String, 'justified_date': fields.String, 'item_id': fields.Integer } ## Single Use Marshal Objects ## # SINGLE USE - RevisionGet REVISION_COMMENT_FIELDS = { 'id': fields.Integer, 'revision_id': fields.Integer, 'date_created': fields.String, 'text': fields.String } # SINGLE USE - ItemGet ITEM_COMMENT_FIELDS = { 'id': fields.Integer, 'date_created': fields.String, 'text': fields.String, 'item_id': fields.Integer } # SINGLE USE - UserSettings USER_SETTINGS_FIELDS = { # 'id': fields.Integer, 'daily_audit_email': fields.Boolean, 'change_reports': fields.String } # SINGLE USE - AccountGet ACCOUNT_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'identifier': fields.String, 'notes': fields.String, 'active': fields.Boolean, 'third_party': fields.Boolean, 'account_type': fields.String } USER_FIELDS = { 'id': fields.Integer, 'active': fields.Boolean, 'email': fields.String, 'role': fields.String, 'confirmed_at': fields.String, 'daily_audit_email': fields.Boolean, 'change_reports': fields.String, 'last_login_at': fields.String, 'current_login_at': fields.String, 'login_count': fields.Integer, 'last_login_ip': fields.String, 'current_login_ip': fields.String } ROLE_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'description': fields.String, } WHITELIST_FIELDS = { 'id': fields.Integer, 'name': fields.String, 'notes': fields.String, 'cidr': fields.String } IGNORELIST_FIELDS = { 'id': fields.Integer, 'prefix': fields.String, 'notes': fields.String, } AUDITORSETTING_FIELDS = { 'id': fields.Integer, 'disabled': fields.Boolean, 'issue_text': fields.String } ITEM_LINK_FIELDS = { 'id': fields.Integer, 'name': fields.String } class AuthenticatedService(Resource): def __init__(self): self.reqparse = reqparse.RequestParser() super(AuthenticatedService, self).__init__() self.auth_dict = dict() if current_user.is_authenticated(): roles_marshal = [] for role in current_user.roles: roles_marshal.append(marshal(role.__dict__, ROLE_FIELDS)) roles_marshal.append({"name": current_user.role}) for role in RBACRole.roles[current_user.role].get_parents(): roles_marshal.append({"name": role.name}) self.auth_dict = { "authenticated": True, "user": current_user.email, "roles": roles_marshal } else: if app.config.get('FRONTED_BY_NGINX'): url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login') else: url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login') self.auth_dict = { "authenticated": False, "user": None, "url": url } @app.after_request @crossdomain(allowed_origins=ORIGINS) def after(response): response.set_cookie('XSRF-COOKIE', generate_csrf()) return response
./CrossVul/dataset_final_sorted/CWE-601/py/bad_3250_2
crossvul-python_data_good_3250_0
import itertools from flask import request, abort, _app_ctx_stack, redirect from flask_security.core import AnonymousUser from security_monkey.datastore import User try: from flask.ext.login import current_user except ImportError: current_user = None from .models import RBACRole, RBACUserMixin from . import anonymous from flask import Response import json class AccessControlList(object): """ This class record rules for access controling. """ def __init__(self): self._allowed = [] self._exempt = [] self.seted = False def allow(self, role, method, resource, with_children=True): """Add allowing rules. :param role: Role of this rule. :param method: Method to allow in rule, include GET, POST, PUT etc. :param resource: Resource also view function. :param with_children: Allow role's children in rule as well if with_children is `True` """ if with_children: for r in role.get_children(): permission = (r.name, method, resource) if permission not in self._allowed: self._allowed.append(permission) permission = (role.name, method, resource) if permission not in self._allowed: self._allowed.append(permission) def exempt(self, view_func): """Exempt a view function from being checked permission :param view_func: The view function exempt from checking. """ if not view_func in self._exempt: self._exempt.append(view_func) def is_allowed(self, role, method, resource): """Check whether role is allowed to access resource :param role: Role to be checked. :param method: Method to be checked. :param resource: View function to be checked. """ return (role, method, resource) in self._allowed def is_exempt(self, view_func): """Return whether view_func is exempted. :param view_func: View function to be checked. """ return view_func in self._exempt class _RBACState(object): """Records configuration for Flask-RBAC""" def __init__(self, rbac, app): self.rbac = rbac self.app = app class RBAC(object): """ This class implements role-based access control module in Flask. There are two way to initialize Flask-RBAC:: app = Flask(__name__) rbac = RBAC(app) :param app: the Flask object """ _role_model = RBACRole _user_model = RBACUserMixin def __init__(self, app): self.acl = AccessControlList() self.before_acl = [] self.app = app self.init_app(app) def init_app(self, app): # Add (RBAC, app) to flask extensions. # Add hook to authenticate permission before request. if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['rbac'] = _RBACState(self, app) self.acl.allow(anonymous, 'GET', app.view_functions['static'].__name__) app.before_first_request(self._setup_acl) app.before_request(self._authenticate) def has_permission(self, method, endpoint, user=None): """Return whether the current user can access the resource. Example:: @app.route('/some_url', methods=['GET', 'POST']) @rbac.allow(['anonymous'], ['GET']) def a_view_func(): return Response('Blah Blah...') If you are not logged. `rbac.has_permission('GET', 'a_view_func')` return True. `rbac.has_permission('POST', 'a_view_func')` return False. :param method: The method wait to check. :param endpoint: The application endpoint. :param user: user who you need to check. Current user by default. """ app = self.get_app() _user = user or current_user roles = _user.get_roles() view_func = app.view_functions[endpoint] return self._check_permission(roles, method, view_func) def check_perm(self, role, method, callback=None): def decorator(view_func): if not self._check_permission([role], method, view_func): if callable(callback): callback() else: self._deny_hook() return view_func return decorator def allow(self, roles, methods, with_children=True): """Decorator: allow roles to access the view func with it. :param roles: List, each name of roles. Please note that, `anonymous` is refered to anonymous. If you add `anonymous` to the rule, everyone can access the resource, unless you deny other roles. :param methods: List, each name of methods. methods is valid in ['GET', 'POST', 'PUT', 'DELETE'] :param with_children: Whether allow children of roles as well. True by default. """ def decorator(view_func): _methods = [m.upper() for m in methods] for r, m, v in itertools.product(roles, _methods, [view_func.__name__]): self.before_acl.append((r, m, v, with_children)) return view_func return decorator def exempt(self, view_func): """ Decorator function Exempt a view function from being checked permission. """ self.acl.exempt(view_func.__name__) return view_func def get_app(self, reference_app=None): """ Helper to look up an app. """ if reference_app is not None: return reference_app if self.app is not None: return self.app ctx = _app_ctx_stack.top if ctx is not None: return ctx.app raise RuntimeError('application not registered on rbac ' 'instance and no application bound ' 'to current context') def _authenticate(self): app = self.get_app() assert app, "Please initialize your application into Flask-RBAC." assert self._role_model, "Please set role model before authenticate." assert self._user_model, "Please set user model before authenticate." user = current_user if not isinstance(user._get_current_object(), self._user_model) and not isinstance(user._get_current_object(), AnonymousUser): raise TypeError( "%s is not an instance of %s" % (user, self._user_model.__class__)) endpoint = request.endpoint resource = app.view_functions.get(endpoint, None) if not resource: abort(404) method = request.method if not hasattr(user, 'get_roles'): roles = [anonymous] else: roles = user.get_roles() permit = self._check_permission(roles, method, resource) if not permit: return self._deny_hook(resource=resource) def _check_permission(self, roles, method, resource): resource = resource.__name__ if self.acl.is_exempt(resource): return True if not self.acl.seted: self._setup_acl() _roles = set() _methods = {'*', method} _resources = {None, resource} _roles.add(anonymous) _roles.update(roles) for r, m, res in itertools.product(_roles, _methods, _resources): if self.acl.is_allowed(r.name, m, res): return True return False def _deny_hook(self, resource=None): app = self.get_app() if current_user.is_authenticated: status = 403 else: status = 401 #abort(status) if app.config.get('FRONTED_BY_NGINX'): url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login') else: url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login') if current_user.is_authenticated: auth_dict = { "authenticated": True, "user": current_user.email, "roles": current_user.role, } else: auth_dict = { "authenticated": False, "user": None, "url": url } return Response(response=json.dumps({"auth": auth_dict}), status=status, mimetype="application/json") def _setup_acl(self): for rn, method, resource, with_children in self.before_acl: role = self._role_model.get_by_name(rn) self.acl.allow(role, method, resource, with_children) self.acl.seted = True
./CrossVul/dataset_final_sorted/CWE-601/py/good_3250_0
crossvul-python_data_good_752_0
"""Tornado handlers for logging into the notebook.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os try: from urllib.parse import urlparse # Py 3 except ImportError: from urlparse import urlparse # Py 2 import uuid from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import IPythonHandler class LoginHandler(IPythonHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url # protect chrome users from mishandling unescaped backslashes. # \ is not valid in urls, but some browsers treat it as / # instead of %5C, causing `\\` to behave as `//` url = url.replace("\\", "%5C") parsed = urlparse(url) if parsed.netloc or not (parsed.path + '/').startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if parsed.netloc: # if full URL, run our cross-origin check: origin = '%s://%s' % (parsed.scheme, parsed.netloc) origin = origin.lower() if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get('allow_password_change'): config_dir = self.settings.get('config_dir') config_file = os.path.join(config_dir, 'jupyter_notebook_config.json') set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the notebook application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The notebook server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/good_752_0
crossvul-python_data_good_753_0
"""Tornado handlers for logging into the notebook.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os try: from urllib.parse import urlparse # Py 3 except ImportError: from urlparse import urlparse # Py 2 import uuid from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import IPythonHandler class LoginHandler(IPythonHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url parsed = urlparse(url) if parsed.netloc or not (parsed.path + '/').startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if parsed.netloc: # if full URL, run our cross-origin check: origin = '%s://%s' % (parsed.scheme, parsed.netloc) origin = origin.lower() if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get('allow_password_change'): config_dir = self.settings.get('config_dir') config_file = os.path.join(config_dir, 'jupyter_notebook_config.json') set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the notebook application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The notebook server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/good_753_0
crossvul-python_data_bad_1915_5
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple, Union, ) from prometheus_client import Counter, Gauge, Histogram from twisted.internet import defer from twisted.internet.abstract import isIPAddress from twisted.python import failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, FederationError, IncompatibleRoomVersionError, NotFoundError, SynapseError, UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.endpoint import parse_server_name from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, run_in_background, ) from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, ) from synapse.types import JsonDict, get_domain_from_id from synapse.util import glob_to_regex, json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: from synapse.server import HomeServer # when processing incoming transactions, we try to handle multiple rooms in # parallel, up to this limit. TRANSACTION_CONCURRENCY_LIMIT = 10 logger = logging.getLogger(__name__) received_pdus_counter = Counter("synapse_federation_server_received_pdus", "") received_edus_counter = Counter("synapse_federation_server_received_edus", "") received_queries_counter = Counter( "synapse_federation_server_received_queries", "", ["type"] ) pdu_process_time = Histogram( "synapse_federation_server_pdu_process_time", "Time taken to process an event", ) last_pdu_age_metric = Gauge( "synapse_federation_last_received_pdu_age", "The age (in seconds) of the last PDU successfully received from the given domain", labelnames=("server_name",), ) class FederationServer(FederationBase): def __init__(self, hs): super().__init__(hs) self.auth = hs.get_auth() self.handler = hs.get_federation_handler() self.state = hs.get_state_handler() self.device_handler = hs.get_device_handler() # Ensure the following handlers are loaded since they register callbacks # with FederationHandlerRegistry. hs.get_directory_handler() self._federation_ratelimiter = hs.get_federation_ratelimiter() self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") # We cache results for transaction with the same ID self._transaction_resp_cache = ResponseCache( hs, "fed_txn_handler", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self.transaction_actions = TransactionActions(self.store) self.registry = hs.get_federation_registry() # We cache responses to state queries, as they take a while and often # come in waves. self._state_resp_cache = ResponseCache( hs, "state_resp", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self._state_ids_resp_cache = ResponseCache( hs, "state_ids_resp", timeout_ms=30000 ) # type: ResponseCache[Tuple[str, str]] self._federation_metrics_domains = ( hs.get_config().federation.federation_metrics_domains ) async def on_backfill_request( self, origin: str, room_id: str, versions: List[str], limit: int ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdus = await self.handler.on_backfill_request( origin, room_id, versions, limit ) res = self._transaction_from_pdus(pdus).get_dict() return 200, res async def on_incoming_transaction( self, origin: str, transaction_data: JsonDict ) -> Tuple[int, Dict[str, Any]]: # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() transaction = Transaction(**transaction_data) transaction_id = transaction.transaction_id # type: ignore if not transaction_id: raise Exception("Transaction missing transaction_id") logger.debug("[%s] Got transaction", transaction_id) # We wrap in a ResponseCache so that we de-duplicate retried # transactions. return await self._transaction_resp_cache.wrap( (origin, transaction_id), self._on_incoming_transaction_inner, origin, transaction, request_time, ) async def _on_incoming_transaction_inner( self, origin: str, transaction: Transaction, request_time: int ) -> Tuple[int, Dict[str, Any]]: # Use a linearizer to ensure that transactions from a remote are # processed in order. with await self._transaction_linearizer.queue(origin): # We rate limit here *after* we've queued up the incoming requests, # so that we don't fill up the ratelimiter with blocked requests. # # This is important as the ratelimiter allows N concurrent requests # at a time, and only starts ratelimiting if there are more requests # than that being processed at a time. If we queued up requests in # the linearizer/response cache *after* the ratelimiting then those # queued up requests would count as part of the allowed limit of N # concurrent requests. with self._federation_ratelimiter.ratelimit(origin) as d: await d result = await self._handle_incoming_transaction( origin, transaction, request_time ) return result async def _handle_incoming_transaction( self, origin: str, transaction: Transaction, request_time: int ) -> Tuple[int, Dict[str, Any]]: """ Process an incoming transaction and return the HTTP response Args: origin: the server making the request transaction: incoming transaction request_time: timestamp that the HTTP request arrived at Returns: HTTP response code and body """ response = await self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( "[%s] We've already responded to this request", transaction.transaction_id, # type: ignore ) return response logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore # Reject if PDU count > 50 or EDU count > 100 if len(transaction.pdus) > 50 or ( # type: ignore hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore ): logger.info("Transaction PDU or EDU count too large. Returning 400") response = {} await self.transaction_actions.set_response( origin, transaction, 400, response ) return 400, response # We process PDUs and EDUs in parallel. This is important as we don't # want to block things like to device messages from reaching clients # behind the potentially expensive handling of PDUs. pdu_results, _ = await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self._handle_pdus_in_txn, origin, transaction, request_time ), run_in_background(self._handle_edus_in_txn, origin, transaction), ], consumeErrors=True, ).addErrback(unwrapFirstError) ) response = {"pdus": pdu_results} logger.debug("Returning: %s", str(response)) await self.transaction_actions.set_response(origin, transaction, 200, response) return 200, response async def _handle_pdus_in_txn( self, origin: str, transaction: Transaction, request_time: int ) -> Dict[str, dict]: """Process the PDUs in a received transaction. Args: origin: the server making the request transaction: incoming transaction request_time: timestamp that the HTTP request arrived at Returns: A map from event ID of a processed PDU to any errors we should report back to the sending server. """ received_pdus_counter.inc(len(transaction.pdus)) # type: ignore origin_host, _ = parse_server_name(origin) pdus_by_room = {} # type: Dict[str, List[EventBase]] newest_pdu_ts = 0 for p in transaction.pdus: # type: ignore # FIXME (richardv): I don't think this works: # https://github.com/matrix-org/synapse/issues/8429 if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: p["age"] = unsigned["age"] if "age" in p: p["age_ts"] = request_time - int(p["age"]) del p["age"] # We try and pull out an event ID so that if later checks fail we # can log something sensible. We don't mandate an event ID here in # case future event formats get rid of the key. possible_event_id = p.get("event_id", "<Unknown>") # Now we get the room ID so that we can check that we know the # version of the room. room_id = p.get("room_id") if not room_id: logger.info( "Ignoring PDU as does not have a room_id. Event ID: %s", possible_event_id, ) continue try: room_version = await self.store.get_room_version(room_id) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue except UnsupportedRoomVersionError as e: # this can happen if support for a given room version is withdrawn, # so that we still get events for said room. logger.info("Ignoring PDU: %s", e) continue event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) if event.origin_server_ts > newest_pdu_ts: newest_pdu_ts = event.origin_server_ts pdu_results = {} # we can process different rooms in parallel (which is useful if they # require callouts to other servers to fetch missing events), but # impose a limit to avoid going too crazy with ram/cpu. async def process_pdus_for_room(room_id: str): logger.debug("Processing PDUs for %s", room_id) try: await self.check_server_matches_acl(origin_host, room_id) except AuthError as e: logger.warning("Ignoring PDUs for room %s from banned server", room_id) for pdu in pdus_by_room[room_id]: event_id = pdu.event_id pdu_results[event_id] = e.error_dict() return for pdu in pdus_by_room[room_id]: event_id = pdu.event_id with pdu_process_time.time(): with nested_logging_context(event_id): try: await self._handle_received_pdu(origin, pdu) pdu_results[event_id] = {} except FederationError as e: logger.warning("Error handling PDU %s: %s", event_id, e) pdu_results[event_id] = {"error": str(e)} except Exception as e: f = failure.Failure() pdu_results[event_id] = {"error": str(e)} logger.error( "Failed to handle PDU %s", event_id, exc_info=(f.type, f.value, f.getTracebackObject()), ) await concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT ) if newest_pdu_ts and origin in self._federation_metrics_domains: newest_pdu_age = self._clock.time_msec() - newest_pdu_ts last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000) return pdu_results async def _handle_edus_in_txn(self, origin: str, transaction: Transaction): """Process the EDUs in a received transaction. """ async def _process_edu(edu_dict): received_edus_counter.inc() edu = Edu( origin=origin, destination=self.server_name, edu_type=edu_dict["edu_type"], content=edu_dict["content"], ) await self.registry.on_edu(edu.edu_type, origin, edu.content) await concurrently_execute( _process_edu, getattr(transaction, "edus", []), TRANSACTION_CONCURRENCY_LIMIT, ) async def on_room_state_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # we grab the linearizer to protect ourselves from servers which hammer # us. In theory we might already have the response to this query # in the cache so we could return it without waiting for the linearizer # - but that's non-trivial to get right, and anyway somewhat defeats # the point of the linearizer. with (await self._server_linearizer.queue((origin, room_id))): resp = dict( await self._state_resp_cache.wrap( (room_id, event_id), self._on_context_state_request_compute, room_id, event_id, ) ) room_version = await self.store.get_room_version_id(room_id) resp["room_version"] = room_version return 200, resp async def on_state_ids_request( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: if not event_id: raise NotImplementedError("Specify an event") origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") resp = await self._state_ids_resp_cache.wrap( (room_id, event_id), self._on_state_ids_request_compute, room_id, event_id, ) return 200, resp async def _on_state_ids_request_compute(self, room_id, event_id): state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = await self.store.get_auth_chain_ids(state_ids) return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} async def _on_context_state_request_compute( self, room_id: str, event_id: str ) -> Dict[str, list]: if event_id: pdus = await self.handler.get_state_for_pdu(room_id, event_id) else: pdus = (await self.state.get_current_state(room_id)).values() auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus]) return { "pdus": [pdu.get_pdu_json() for pdu in pdus], "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], } async def on_pdu_request( self, origin: str, event_id: str ) -> Tuple[int, Union[JsonDict, str]]: pdu = await self.handler.get_persisted_pdu(origin, event_id) if pdu: return 200, self._transaction_from_pdus([pdu]).get_dict() else: return 404, "" async def on_query_request( self, query_type: str, args: Dict[str, str] ) -> Tuple[int, Dict[str, Any]]: received_queries_counter.labels(query_type).inc() resp = await self.registry.on_query(query_type, args) return 200, resp async def on_make_join_request( self, origin: str, room_id: str, user_id: str, supported_versions: List[str] ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) room_version = await self.store.get_room_version_id(room_id) if room_version not in supported_versions: logger.warning( "Room version %s not in %s", room_version, supported_versions ) raise IncompatibleRoomVersionError(room_version=room_version) pdu = await self.handler.on_make_join_request(origin, room_id, user_id) time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} async def on_invite_request( self, origin: str, content: JsonDict, room_version_id: str ) -> Dict[str, Any]: room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) if not room_version: raise SynapseError( 400, "Homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) pdu = await self._check_sigs_and_hash(room_version, pdu) ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} async def on_send_join_request( self, origin: str, content: JsonDict ) -> Dict[str, Any]: logger.debug("on_send_join_request: content: %s", content) assert_params_in_dict(content, ["room_id"]) room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) pdu = await self._check_sigs_and_hash(room_version, pdu) res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() return { "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]], } async def on_make_leave_request( self, origin: str, room_id: str, user_id: str ) -> Dict[str, Any]: origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) pdu = await self.handler.on_make_leave_request(origin, room_id, user_id) room_version = await self.store.get_room_version_id(room_id) time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict: logger.debug("on_send_leave_request: content: %s", content) assert_params_in_dict(content, ["room_id"]) room_version = await self.store.get_room_version(content["room_id"]) pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) pdu = await self._check_sigs_and_hash(room_version, pdu) await self.handler.on_send_leave_request(origin, pdu) return {} async def on_event_auth( self, origin: str, room_id: str, event_id: str ) -> Tuple[int, Dict[str, Any]]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) time_now = self._clock.time_msec() auth_pdus = await self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} return 200, res @log_function async def on_query_client_keys( self, origin: str, content: Dict[str, str] ) -> Tuple[int, Dict[str, Any]]: return await self.on_query_request("client_keys", content) async def on_query_user_devices( self, origin: str, user_id: str ) -> Tuple[int, Dict[str, Any]]: keys = await self.device_handler.on_federation_query_user_devices(user_id) return 200, keys @trace async def on_claim_client_keys( self, origin: str, content: JsonDict ) -> Dict[str, Any]: query = [] for user_id, device_keys in content.get("one_time_keys", {}).items(): for device_id, algorithm in device_keys.items(): query.append((user_id, device_id, algorithm)) log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self.store.claim_e2e_one_time_keys(query) json_result = {} # type: Dict[str, Dict[str, dict]] for user_id, device_keys in results.items(): for device_id, keys in device_keys.items(): for key_id, json_str in keys.items(): json_result.setdefault(user_id, {})[device_id] = { key_id: json_decoder.decode(json_str) } logger.info( "Claimed one-time-keys: %s", ",".join( ( "%s for %s:%s" % (key_id, user_id, device_id) for user_id, user_keys in json_result.items() for device_id, device_keys in user_keys.items() for key_id, _ in device_keys.items() ) ), ) return {"one_time_keys": json_result} async def on_get_missing_events( self, origin: str, room_id: str, earliest_events: List[str], latest_events: List[str], limit: int, ) -> Dict[str, list]: with (await self._server_linearizer.queue((origin, room_id))): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) logger.debug( "on_get_missing_events: earliest_events: %r, latest_events: %r," " limit: %d", earliest_events, latest_events, limit, ) missing_events = await self.handler.on_get_missing_events( origin, room_id, earliest_events, latest_events, limit ) if len(missing_events) < 5: logger.debug( "Returning %d events: %r", len(missing_events), missing_events ) else: logger.debug("Returning %d events", len(missing_events)) time_now = self._clock.time_msec() return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function async def on_openid_userinfo(self, token: str) -> Optional[str]: ts_now_ms = self._clock.time_msec() return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction: """Returns a new Transaction containing the given PDUs suitable for transmission. """ time_now = self._clock.time_msec() pdus = [p.get_pdu_json(time_now) for p in pdu_list] return Transaction( origin=self.server_name, pdus=pdus, origin_server_ts=int(time_now), destination=None, ) async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None: """ Process a PDU received in a federation /send/ transaction. If the event is invalid, then this method throws a FederationError. (The error will then be logged and sent back to the sender (which probably won't do anything with it), and other events in the transaction will be processed as normal). It is likely that we'll then receive other events which refer to this rejected_event in their prev_events, etc. When that happens, we'll attempt to fetch the rejected event again, which will presumably fail, so those second-generation events will also get rejected. Eventually, we get to the point where there are more than 10 events between any new events and the original rejected event. Since we only try to backfill 10 events deep on received pdu, we then accept the new event, possibly introducing a discontinuity in the DAG, with new forward extremities, so normal service is approximately returned, until we try to backfill across the discontinuity. Args: origin: server which sent the pdu pdu: received pdu Raises: FederationError if the signatures / hash do not match, or if the event was unacceptable for any other reason (eg, too large, too many prev_events, couldn't find the prev_events) """ # check that it's actually being sent from a valid destination to # workaround bug #1753 in 0.18.5 and 0.18.6 if origin != get_domain_from_id(pdu.sender): # We continue to accept join events from any server; this is # necessary for the federation join dance to work correctly. # (When we join over federation, the "helper" server is # responsible for sending out the join event, rather than the # origin. See bug #1893. This is also true for some third party # invites). if not ( pdu.type == "m.room.member" and pdu.content and pdu.content.get("membership", None) in (Membership.JOIN, Membership.INVITE) ): logger.info( "Discarding PDU %s from invalid origin %s", pdu.event_id, origin ) return else: logger.info("Accepting join PDU %s from %s", pdu.event_id, origin) # We've already checked that we know the room version by this point room_version = await self.store.get_room_version(pdu.room_id) # Check signature. try: pdu = await self._check_sigs_and_hash(room_version, pdu) except SynapseError as e: raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id) await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True) def __str__(self): return "<ReplicationLayer(%s)>" % self.server_name async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict ): ret = await self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) return ret async def on_exchange_third_party_invite_request(self, event_dict: Dict): ret = await self.handler.on_exchange_third_party_invite_request(event_dict) return ret async def check_server_matches_acl(self, server_name: str, room_id: str): """Check if the given server is allowed by the server ACLs in the room Args: server_name: name of server, *without any port part* room_id: ID of the room to check Raises: AuthError if the server does not match the ACL """ state_ids = await self.store.get_current_state_ids(room_id) acl_event_id = state_ids.get((EventTypes.ServerACL, "")) if not acl_event_id: return acl_event = await self.store.get_event(acl_event_id) if server_matches_acl_event(server_name, acl_event): return raise AuthError(code=403, msg="Server is banned from room") def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: """Check if the given server is allowed by the ACL event Args: server_name: name of server, without any port part acl_event: m.room.server_acl event Returns: True if this server is allowed by the ACLs """ logger.debug("Checking %s against acl %s", server_name, acl_event.content) # first of all, check if literal IPs are blocked, and if so, whether the # server name is a literal IP allow_ip_literals = acl_event.content.get("allow_ip_literals", True) if not isinstance(allow_ip_literals, bool): logger.warning("Ignoring non-bool allow_ip_literals flag") allow_ip_literals = True if not allow_ip_literals: # check for ipv6 literals. These start with '['. if server_name[0] == "[": return False # check for ipv4 literals. We can just lift the routine from twisted. if isIPAddress(server_name): return False # next, check the deny list deny = acl_event.content.get("deny", []) if not isinstance(deny, (list, tuple)): logger.warning("Ignoring non-list deny ACL %s", deny) deny = [] for e in deny: if _acl_entry_matches(server_name, e): # logger.info("%s matched deny rule %s", server_name, e) return False # then the allow list. allow = acl_event.content.get("allow", []) if not isinstance(allow, (list, tuple)): logger.warning("Ignoring non-list allow ACL %s", allow) allow = [] for e in allow: if _acl_entry_matches(server_name, e): # logger.info("%s matched allow rule %s", server_name, e) return True # everything else should be rejected. # logger.info("%s fell through", server_name) return False def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool: if not isinstance(acl_entry, str): logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) ) return False regex = glob_to_regex(acl_entry) return bool(regex.match(server_name)) class FederationHandlerRegistry: """Allows classes to register themselves as handlers for a given EDU or query type for incoming federation traffic. """ def __init__(self, hs: "HomeServer"): self.config = hs.config self.http_client = hs.get_simple_http_client() self.clock = hs.get_clock() self._instance_name = hs.get_instance_name() # These are safe to load in monolith mode, but will explode if we try # and use them. However we have guards before we use them to ensure that # we don't route to ourselves, and in monolith mode that will always be # the case. self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs) self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs) self.edu_handlers = ( {} ) # type: Dict[str, Callable[[str, dict], Awaitable[None]]] self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]] # Map from type to instance name that we should route EDU handling to. self._edu_type_to_instance = {} # type: Dict[str, str] def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] ): """Sets the handler callable that will be used to handle an incoming federation EDU of the given type. Args: edu_type: The type of the incoming EDU to register handler for handler: A callable invoked on incoming EDU of the given type. The arguments are the origin server name and the EDU contents. """ if edu_type in self.edu_handlers: raise KeyError("Already have an EDU handler for %s" % (edu_type,)) logger.info("Registering federation EDU handler for %r", edu_type) self.edu_handlers[edu_type] = handler def register_query_handler( self, query_type: str, handler: Callable[[dict], defer.Deferred] ): """Sets the handler callable that will be used to handle an incoming federation query of the given type. Args: query_type: Category name of the query, which should match the string used by make_query. handler: Invoked to handle incoming queries of this type. The return will be yielded on and the result used as the response to the query request. """ if query_type in self.query_handlers: raise KeyError("Already have a Query handler for %s" % (query_type,)) logger.info("Registering federation query handler for %r", query_type) self.query_handlers[query_type] = handler def register_instance_for_edu(self, edu_type: str, instance_name: str): """Register that the EDU handler is on a different instance than master. """ self._edu_type_to_instance[edu_type] = instance_name async def on_edu(self, edu_type: str, origin: str, content: dict): if not self.config.use_presence and edu_type == "m.presence": return # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: with start_active_span_from_edu(content, "handle_edu"): try: await handler(origin, content) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) except Exception: logger.exception("Failed to handle edu %r", edu_type) return # Check if we can route it somewhere else that isn't us route_to = self._edu_type_to_instance.get(edu_type, "master") if route_to != self._instance_name: try: await self._send_edu( instance_name=route_to, edu_type=edu_type, origin=origin, content=content, ) except SynapseError as e: logger.info("Failed to handle edu %r: %r", edu_type, e) except Exception: logger.exception("Failed to handle edu %r", edu_type) return # Oh well, let's just log and move on. logger.warning("No handler registered for EDU type %s", edu_type) async def on_query(self, query_type: str, args: dict): handler = self.query_handlers.get(query_type) if handler: return await handler(args) # Check if we can route it somewhere else that isn't us if self._instance_name == "master": return await self._get_query_client(query_type=query_type, args=args) # Uh oh, no handler! Let's raise an exception so the request returns an # error. logger.warning("No handler registered for query type %s", query_type) raise NotFoundError("No handler for Query type '%s'" % (query_type,))
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_5
crossvul-python_data_good_1915_4
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib from collections import defaultdict import attr from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, is_signing_algorithm_supported, ) from signedjson.sign import ( SignatureVerifyException, encode_canonical_json, signature_ids, verify_signed_json, ) from unpaddedbase64 import decode_base64 from twisted.internet import defer from synapse.api.errors import ( Codes, HttpResponseException, RequestSendFailed, SynapseError, ) from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, preserve_fn, run_in_background, ) from synapse.storage.keys import FetchKeyResult from synapse.util import unwrapFirstError from synapse.util.async_helpers import yieldable_gather_results from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @attr.s(slots=True, cmp=False) class VerifyJsonRequest: """ A request to verify a JSON object. Attributes: server_name(str): The name of the server to verify against. key_ids(set[str]): The set of key_ids to that could be used to verify the JSON object json_object(dict): The JSON object to verify. minimum_valid_until_ts (int): time at which we require the signing key to be valid. (0 implies we don't care) key_ready (Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. If we are unable to find a key which satisfies the request, the deferred errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() request_name = attr.ib() key_ids = attr.ib(init=False) key_ready = attr.ib(default=attr.Factory(defer.Deferred)) def __attrs_post_init__(self): self.key_ids = signature_ids(self.json_object, self.server_name) class KeyLookupError(ValueError): pass class Keyring: def __init__(self, hs, key_fetchers=None): self.clock = hs.get_clock() if key_fetchers is None: key_fetchers = ( StoreKeyFetcher(hs), PerspectivesKeyFetcher(hs), ServerKeyFetcher(hs), ) self._key_fetchers = key_fetchers # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download # completes. # # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} def verify_json_for_server( self, server_name, json_object, validity_time, request_name ): """Verify that a JSON object has been signed by a given server Args: server_name (str): name of the server which must have signed this object json_object (dict): object to be checked validity_time (int): timestamp at which we require the signing key to be valid. (0 implies we don't care) request_name (str): an identifier for this json object (eg, an event id) for logging. Returns: Deferred[None]: completes if the the object was correctly signed, otherwise errbacks with an error """ req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) requests = (req,) return make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: server_and_json (iterable[Tuple[str, dict, int, str]): Iterable of (server_name, json_object, validity_time, request_name) tuples. validity_time is a timestamp at which the signing key must be valid. request_name is an identifier for this json object (eg, an event id) for logging. Returns: List<Deferred[None]>: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ return self._verify_objects( VerifyJsonRequest(server_name, json_object, validity_time, request_name) for server_name, json_object, validity_time, request_name in server_and_json ) def _verify_objects(self, verify_requests): """Does the work of verify_json_[objects_]for_server Args: verify_requests (iterable[VerifyJsonRequest]): Iterable of verification requests. Returns: List<Deferred[None]>: for each input item, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ # a list of VerifyJsonRequests which are awaiting a key lookup key_lookups = [] handle = preserve_fn(_handle_key_deferred) def process(verify_request): """Process an entry in the request list Adds a key request to key_lookups, and returns a deferred which will complete or fail (in the sentinel context) when verification completes. """ if not verify_request.key_ids: return defer.fail( SynapseError( 400, "Not signed by %s" % (verify_request.server_name,), Codes.UNAUTHORIZED, ) ) logger.debug( "Verifying %s for %s with key_ids %s, min_validity %i", verify_request.request_name, verify_request.server_name, verify_request.key_ids, verify_request.minimum_valid_until_ts, ) # add the key request to the queue, but don't start it off yet. key_lookups.append(verify_request) # now run _handle_key_deferred, which will wait for the key request # to complete and then do the verification. # # We want _handle_key_request to log to the right context, so we # wrap it with preserve_fn (aka run_in_background) return handle(verify_request) results = [process(r) for r in verify_requests] if key_lookups: run_in_background(self._start_key_lookups, key_lookups) return results async def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyJsonRequest]): """ try: # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) # Wait for any previous lookups to complete before proceeding. await self.wait_for_previous_lookups(server_to_request_ids.keys()) # take out a lock on each of the servers by sticking a Deferred in # key_downloads for server_name in server_to_request_ids.keys(): self.key_downloads[server_name] = defer.Deferred() logger.debug("Got key lookup lock on %s", server_name) # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. def drop_server_lock(server_name): d = self.key_downloads.pop(server_name) d.callback(None) def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] server_requests.remove(id(verify_request)) # if there are no more requests for this server, we can drop the lock. if not server_requests: logger.debug("Releasing key lookup lock on %s", server_name) drop_server_lock(server_name) return res for verify_request in verify_requests: verify_request.key_ready.addBoth(lookup_done, verify_request) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups") async def wait_for_previous_lookups(self, server_names) -> None: """Waits for any previous key lookups for the given servers to finish. Args: server_names (Iterable[str]): list of servers which we want to look up Returns: Resolves once all key lookups for the given servers have completed. Follows the synapse rules of logcontext preservation. """ loop_count = 1 while True: wait_on = [ (server_name, self.key_downloads[server_name]) for server_name in server_names if server_name in self.key_downloads ] if not wait_on: break logger.info( "Waiting for existing lookups for %s to complete [loop %i]", [w[0] for w in wait_on], loop_count, ) with PreserveLoggingContext(): await defer.DeferredList((w[1] for w in wait_on)) loop_count += 1 def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request For each verify_request, verify_request.key_ready is called back with params (server_name, key_id, VerifyKey) if a key is found, or errbacked with a SynapseError if none of the keys are found. Args: verify_requests (list[VerifyJsonRequest]): list of verify requests """ remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called} async def do_iterations(): try: with Measure(self.clock, "get_server_verify_keys"): for f in self._key_fetchers: if not remaining_requests: return await self._attempt_key_fetches_with_fetcher( f, remaining_requests ) # look for any requests which weren't satisfied while remaining_requests: verify_request = remaining_requests.pop() rq_str = ( "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" % ( verify_request.server_name, verify_request.key_ids, verify_request.minimum_valid_until_ts, ) ) # If we run the errback immediately, it may cancel our # loggingcontext while we are still in it, so instead we # schedule it for the next time round the reactor. # # (this also ensures that we don't get a stack overflow if we # has a massive queue of lookups waiting for this server). self.clock.call_later( 0, verify_request.key_ready.errback, SynapseError( 401, "Failed to find any key to satisfy %s" % (rq_str,), Codes.UNAUTHORIZED, ), ) except Exception as err: # we don't really expect to get here, because any errors should already # have been caught and logged. But if we do, let's log the error and make # sure that all of the deferreds are resolved. logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): for verify_request in remaining_requests: if not verify_request.key_ready.called: verify_request.key_ready.errback(err) run_in_background(do_iterations) async def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): """Use a key fetcher to attempt to satisfy some key requests Args: fetcher (KeyFetcher): fetcher to use to fetch the keys remaining_requests (set[VerifyJsonRequest]): outstanding key requests. Any successfully-completed requests will be removed from the list. """ # dict[str, dict[str, int]]: keys to fetch. # server_name -> key_id -> min_valid_ts missing_keys = defaultdict(dict) for verify_request in remaining_requests: # any completed requests should already have been removed assert not verify_request.key_ready.called keys_for_server = missing_keys[verify_request.server_name] for key_id in verify_request.key_ids: # If we have several requests for the same key, then we only need to # request that key once, but we should do so with the greatest # min_valid_until_ts of the requests, so that we can satisfy all of # the requests. keys_for_server[key_id] = max( keys_for_server.get(key_id, -1), verify_request.minimum_valid_until_ts, ) results = await fetcher.get_keys(missing_keys) completed = [] for verify_request in remaining_requests: server_name = verify_request.server_name # see if any of the keys we got this time are sufficient to # complete this VerifyJsonRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: fetch_key_result = result_keys.get(key_id) if not fetch_key_result: # we didn't get a result for this key continue if ( fetch_key_result.valid_until_ts < verify_request.minimum_valid_until_ts ): # key was not valid at this point continue # we have a valid key for this request. If we run the callback # immediately, it may cancel our loggingcontext while we are still in # it, so instead we schedule it for the next time round the reactor. # # (this also ensures that we don't get a stack overflow if we had # a massive queue of lookups waiting for this server). logger.debug( "Found key %s:%s for %s", server_name, key_id, verify_request.request_name, ) self.clock.call_later( 0, verify_request.key_ready.callback, (server_name, key_id, fetch_key_result.verify_key), ) completed.append(verify_request) break remaining_requests.difference_update(completed) class KeyFetcher: async def get_keys(self, keys_to_fetch): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: map from server_name -> key_id -> FetchKeyResult """ raise NotImplementedError class StoreKeyFetcher(KeyFetcher): """KeyFetcher impl which fetches keys from our data store""" def __init__(self, hs): self.store = hs.get_datastore() async def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" keys_to_fetch = ( (server_name, key_id) for server_name, keys_for_server in keys_to_fetch.items() for key_id in keys_for_server.keys() ) res = await self.store.get_server_verify_keys(keys_to_fetch) keys = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key return keys class BaseV2KeyFetcher: def __init__(self, hs): self.store = hs.get_datastore() self.config = hs.get_config() async def process_v2_response(self, from_server, response_json, time_added_ms): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from GET /_matrix/key/v2/server, or a single entry from the list returned by POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. Args: from_server (str): the name of the server producing this result: either the origin server for a /_matrix/key/v2/server request, or the notary for a /_matrix/key/v2/query. response_json (dict): the json-decoded Server Keys response object time_added_ms (int): the timestamp to record in server_keys_json Returns: Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ ts_valid_until_ms = response_json["valid_until_ts"] # start by extracting the keys from the response, since they may be required # to validate the signature on the response. verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=ts_valid_until_ms ) server_name = response_json["server_name"] verified = False for key_id in response_json["signatures"].get(server_name, {}): key = verify_keys.get(key_id) if not key: # the key may not be present in verify_keys if: # * we got the key from the notary server, and: # * the key belongs to the notary server, and: # * the notary server is using a different key to sign notary # responses. continue verify_signed_json(response_json, server_name, key.verify_key) verified = True break if not verified: raise KeyLookupError( "Key response for %s is not signed by the origin server" % (server_name,) ) for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = FetchKeyResult( verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) key_json_bytes = encode_canonical_json(response_json) await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self.store.store_server_keys_json, server_name=server_name, key_id=key_id, from_server=from_server, ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=key_json_bytes, ) for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError) ) return verify_keys class PerspectivesKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the "perspectives" servers""" def __init__(self, hs): super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_federation_http_client() self.key_servers = self.config.key_servers async def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" async def get_key(key_server): try: result = await self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server ) return result except KeyLookupError as e: logger.warning( "Key lookup failed from %r: %s", key_server.server_name, e ) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", key_server.server_name, type(e).__name__, str(e), ) return {} results = await make_deferred_yieldable( defer.gatherResults( [run_in_background(get_key, server) for server in self.key_servers], consumeErrors=True, ).addErrback(unwrapFirstError) ) union_of_keys = {} for result in results: for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) return union_of_keys async def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts key_server (synapse.config.key.TrustedKeyServer): notary server to query for the keys Returns: dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]: map from server_name -> key_id -> FetchKeyResult Raises: KeyLookupError if there was an error processing the entire response from the server """ perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", keys_to_fetch.items(), perspective_name, ) try: query_response = await self.client.post_json( destination=perspective_name, path="/_matrix/key/v2/query", data={ "server_keys": { server_name: { key_id: {"minimum_valid_until_ts": min_valid_ts} for key_id, min_valid_ts in server_keys.items() } for server_name, server_keys in keys_to_fetch.items() } }, ) except (NotRetryingDestination, RequestSendFailed) as e: # these both have str() representations which we can't really improve upon raise KeyLookupError(str(e)) except HttpResponseException as e: raise KeyLookupError("Remote server returned an error: %s" % (e,)) keys = {} added_keys = [] time_now_ms = self.clock.time_msec() for response in query_response["server_keys"]: # do this first, so that we can give useful errors thereafter server_name = response.get("server_name") if not isinstance(server_name, str): raise KeyLookupError( "Malformed response from key notary server %s: invalid server_name" % (perspective_name,) ) try: self._validate_perspectives_response(key_server, response) processed_response = await self.process_v2_response( perspective_name, response, time_added_ms=time_now_ms ) except KeyLookupError as e: logger.warning( "Error processing response from key notary server %s for origin " "server %s: %s", perspective_name, server_name, e, ) # we continue to process the rest of the response continue added_keys.extend( (server_name, key_id, key) for key_id, key in processed_response.items() ) keys.setdefault(server_name, {}).update(processed_response) await self.store.store_server_verify_keys( perspective_name, time_now_ms, added_keys ) return keys def _validate_perspectives_response(self, key_server, response): """Optionally check the signature on the result of a /key/query request Args: key_server (synapse.config.key.TrustedKeyServer): the notary server that produced this result response (dict): the json-decoded Server Keys response object """ perspective_name = key_server.server_name perspective_keys = key_server.verify_keys if perspective_keys is None: # signature checking is disabled on this server return if ( "signatures" not in response or perspective_name not in response["signatures"] ): raise KeyLookupError("Response not signed by the notary server") verified = False for key_id in response["signatures"][perspective_name]: if key_id in perspective_keys: verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" % ( list(response["signatures"][perspective_name].keys()), list(perspective_keys.keys()), ) ) class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" def __init__(self, hs): super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_federation_http_client() async def get_keys(self, keys_to_fetch): """ Args: keys_to_fetch (dict[str, iterable[str]]): the keys to be fetched. server_name -> key_ids Returns: dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]: map from server_name -> key_id -> FetchKeyResult """ results = {} async def get_key(key_to_fetch_item): server_name, key_ids = key_to_fetch_item try: keys = await self.get_server_verify_key_v2_direct(server_name, key_ids) results[server_name] = keys except KeyLookupError as e: logger.warning( "Error looking up keys %s from %s: %s", key_ids, server_name, e ) except Exception: logger.exception("Error getting keys %s from %s", key_ids, server_name) await yieldable_gather_results(get_key, keys_to_fetch.items()) return results async def get_server_verify_key_v2_direct(self, server_name, key_ids): """ Args: server_name (str): key_ids (iterable[str]): Returns: dict[str, FetchKeyResult]: map from key ID to lookup result Raises: KeyLookupError if there was a problem making the lookup """ keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: # we may have found this key as a side-effect of asking for another. if requested_key_id in keys: continue time_now_ms = self.clock.time_msec() try: response = await self.client.get_json( destination=server_name, path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), ignore_backoff=True, # we only give the remote server 10s to respond. It should be an # easy request to handle, so if it doesn't reply within 10s, it's # probably not going to. # # Furthermore, when we are acting as a notary server, we cannot # wait all day for all of the origin servers, as the requesting # server will otherwise time out before we can respond. # # (Note that get_json may make 4 attempts, so this can still take # almost 45 seconds to fetch the headers, plus up to another 60s to # read the response). timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: # these both have str() representations which we can't really improve # upon raise KeyLookupError(str(e)) except HttpResponseException as e: raise KeyLookupError("Remote server returned an error: %s" % (e,)) if response["server_name"] != server_name: raise KeyLookupError( "Expected a response for server %r not %r" % (server_name, response["server_name"]) ) response_keys = await self.process_v2_response( from_server=server_name, response_json=response, time_added_ms=time_now_ms, ) await self.store.store_server_verify_keys( server_name, time_now_ms, ((server_name, key_id, key) for key_id, key in response_keys.items()), ) keys.update(response_keys) return keys async def _handle_key_deferred(verify_request) -> None: """Waits for the key to become available, and then performs a verification Args: verify_request (VerifyJsonRequest): Raises: SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name with PreserveLoggingContext(): _, key_id, verify_key = await verify_request.key_ready json_object = verify_request.json_object try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: logger.debug( "Error verifying signature for %s:%s:%s with key %s: %s", server_name, verify_key.alg, verify_key.version, encode_verify_key_base64(verify_key), str(e), ) raise SynapseError( 401, "Invalid signature for server %s with key %s:%s: %s" % (server_name, verify_key.alg, verify_key.version, str(e)), Codes.UNAUTHORIZED, )
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_4
crossvul-python_data_bad_1915_7
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains handlers for federation events.""" import itertools import logging from collections.abc import Container from http import HTTPStatus from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union import attr from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 from twisted.internet import defer from synapse import event_auth from synapse.api.constants import ( EventTypes, Membership, RejectedReason, RoomEncryptionAlgorithms, ) from synapse.api.errors import ( AuthError, CodeMessageException, Codes, FederationDeniedError, FederationError, HttpResponseException, NotFoundError, RequestSendFailed, SynapseError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.handlers._base import BaseHandler from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, preserve_fn, run_in_background, ) from synapse.logging.utils import log_function from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, ) from synapse.state import StateResolutionStore from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( JsonDict, MutableStateMap, PersistedEventPosition, RoomStreamToken, StateMap, UserID, get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) @attr.s(slots=True) class _NewEventInfo: """Holds information about a received event, ready for passing to _handle_new_events Attributes: event: the received event state: the state at that event auth_events: the auth_event map for that event """ event = attr.ib(type=EventBase) state = attr.ib(type=Optional[Sequence[EventBase]], default=None) auth_events = attr.ib(type=Optional[MutableStateMap[EventBase]], default=None) class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: a) handling received Pdus before handing them on as Events to the rest of the homeserver (including auth and state conflict resolutions) b) converting events that were produced by local clients that may need to be sent to remote homeservers. c) doing the necessary dances to invite remote users and join remote rooms. """ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs self.store = hs.get_datastore() self.storage = hs.get_storage() self.state_store = self.storage.state self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self._state_resolution_handler = hs.get_state_resolution_handler() self.server_name = hs.hostname self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config self.http_client = hs.get_simple_http_client() self._instance_name = hs.get_instance_name() self._replication = hs.get_replication_data_handler() self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( hs ) if hs.config.worker_app: self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client( hs ) self._maybe_store_room_on_outlier_membership = ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client( hs ) else: self._device_list_updater = hs.get_device_handler().device_list_updater self._maybe_store_room_on_outlier_membership = ( self.store.maybe_store_room_on_outlier_membership ) # When joining a room we need to queue any events for that room up. # For each room, a list of (pdu, origin) tuples. self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]] self._room_pdu_linearizer = Linearizer("fed_room_pdu") self.third_party_event_rules = hs.get_third_party_event_rules() self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events Args: origin (str): server which initiated the /send/ transaction. Will be used to fetch missing events or state. pdu (FrozenEvent): received PDU sent_to_us_directly (bool): True if this event was pushed to us; False if we pulled it as the result of a missing prev_event. """ room_id = pdu.room_id event_id = pdu.event_id logger.info("handling received PDU: %s", pdu) # We reprocess pdus when we have seen them only as outliers existing = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) # FIXME: Currently we fetch an event again when we already have it # if it has been marked as an outlier. already_seen = existing and ( not existing.internal_metadata.is_outlier() or pdu.internal_metadata.is_outlier() ) if already_seen: logger.debug("[%s %s]: Already seen pdu", room_id, event_id) return # do some initial sanity-checking of the event. In particular, make # sure it doesn't have hundreds of prev_events or auth_events, which # could cause a huge state resolution or cascade of event fetches. try: self._sanity_check_event(pdu) except SynapseError as err: logger.warning( "[%s %s] Received event failed sanity checks", room_id, event_id ) raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id) # If we are currently in the process of joining this room, then we # queue up events for later processing. if room_id in self.room_queues: logger.info( "[%s %s] Queuing PDU from %s for now: join in progress", room_id, event_id, origin, ) self.room_queues[room_id].append((pdu, origin)) return # If we're not in the room just ditch the event entirely. This is # probably an old server that has come back and thinks we're still in # the room (or we've been rejoined to the room by a state reset). # # Note that if we were never in the room then we would have already # dropped the event, since we wouldn't know the room version. is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "[%s %s] Ignoring PDU from %s as we're not in the room", room_id, event_id, origin, ) return None state = None # Get missing pdus if necessary. if not pdu.internal_metadata.is_outlier(): # We only backfill backwards to the min depth. min_depth = await self.get_min_depth_for_context(pdu.room_id) logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth) prevs = set(pdu.prev_event_ids()) seen = await self.store.have_events_in_timeline(prevs) if min_depth is not None and pdu.depth < min_depth: # This is so that we don't notify the user about this # message, to work around the fact that some events will # reference really really old events we really don't want to # send to the clients. pdu.internal_metadata.outlier = True elif min_depth is not None and pdu.depth > min_depth: missing_prevs = prevs - seen if sent_to_us_directly and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) with (await self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( "[%s %s] Acquired room lock to fetch %d missing prev_events", room_id, event_id, len(missing_prevs), ) try: await self._get_missing_events_for_pdu( origin, pdu, prevs, min_depth ) except Exception as e: raise Exception( "Error fetching missing prev_events for %s: %s" % (event_id, e) ) from e # Update the set of things we've seen after trying to # fetch the missing stuff seen = await self.store.have_events_in_timeline(prevs) if not prevs - seen: logger.info( "[%s %s] Found all missing prev_events", room_id, event_id, ) if prevs - seen: # We've still not been able to get all of the prev_events for this event. # # In this case, we need to fall back to asking another server in the # federation for the state at this event. That's ok provided we then # resolve the state against other bits of the DAG before using it (which # will ensure that you can't just take over a room by sending an event, # withholding its prev_events, and declaring yourself to be an admin in # the subsequent state request). # # Now, if we're pulling this event as a missing prev_event, then clearly # this event is not going to become the only forward-extremity and we are # guaranteed to resolve its state against our existing forward # extremities, so that should be fine. # # On the other hand, if this event was pushed to us, it is possible for # it to become the only forward-extremity in the room, and we would then # trust its state to be the state for the whole room. This is very bad. # Further, if the event was pushed to us, there is no excuse for us not to # have all the prev_events. We therefore reject any such events. # # XXX this really feels like it could/should be merged with the above, # but there is an interaction with min_depth that I'm not really # following. if sent_to_us_directly: logger.warning( "[%s %s] Rejecting: failed to fetch %d prev events: %s", room_id, event_id, len(prevs - seen), shortstr(prevs - seen), ) raise FederationError( "ERROR", 403, ( "Your server isn't divulging details about prev_events " "referenced in this event." ), affected=pdu.event_id, ) logger.info( "Event %s is missing prev_events: calculating state for a " "backwards extremity", event_id, ) # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. event_map = {event_id: pdu} try: # Get the state of the events we know about ours = await self.state_store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id state_maps = list(ours.values()) # type: List[StateMap[str]] # we don't need this any more, let's delete it. del ours # Ask the remote server for the states we don't # know about for p in prevs - seen: logger.info( "Requesting state at missing prev_event %s", event_id, ) with nested_logging_context(p): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. (remote_state, _,) = await self._get_state_for_room( origin, room_id, p, include_event_in_state=True ) remote_state_map = { (x.type, x.state_key): x.event_id for x in remote_state } state_maps.append(remote_state_map) for x in remote_state: event_map[x.event_id] = x room_version = await self.store.get_room_version_id(room_id) state_map = await self._state_resolution_handler.resolve_events_with_store( room_id, room_version, state_maps, event_map, state_res_store=StateResolutionStore(self.store), ) # We need to give _process_received_pdu the actual state events # rather than event ids, so generate that now. # First though we need to fetch all the events that are in # state_map, so we can build up the state below. evs = await self.store.get_events( list(state_map.values()), get_prev_content=False, redact_behaviour=EventRedactBehaviour.AS_IS, ) event_map.update(evs) state = [event_map[e] for e in state_map.values()] except Exception: logger.warning( "[%s %s] Error attempting to resolve state at missing " "prev_events", room_id, event_id, exc_info=True, ) raise FederationError( "ERROR", 403, "We can't get valid state history.", affected=event_id, ) await self._process_received_pdu(origin, pdu, state=state) async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): """ Args: origin (str): Origin of the pdu. Will be called to get the missing events pdu: received pdu prevs (set(str)): List of event ids which we are missing min_depth (int): Minimum depth of events to return. """ room_id = pdu.room_id event_id = pdu.event_id seen = await self.store.have_events_in_timeline(prevs) if not prevs - seen: return latest_list = await self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us latest = set(latest_list) latest |= seen logger.info( "[%s %s]: Requesting missing events between %s and %s", room_id, event_id, shortstr(latest), event_id, ) # XXX: we set timeout to 10s to help workaround # https://github.com/matrix-org/synapse/issues/1733. # The reason is to avoid holding the linearizer lock # whilst processing inbound /send transactions, causing # FDs to stack up and block other inbound transactions # which empirically can currently take up to 30 minutes. # # N.B. this explicitly disables retry attempts. # # N.B. this also increases our chances of falling back to # fetching fresh state for the room if the missing event # can't be found, which slightly reduces our security. # it may also increase our DAG extremity count for the room, # causing additional state resolution? See #1760. # However, fetching state doesn't hold the linearizer lock # apparently. # # see https://github.com/matrix-org/synapse/pull/1744 # # ---- # # Update richvdh 2018/09/18: There are a number of problems with timing this # request out aggressively on the client side: # # - it plays badly with the server-side rate-limiter, which starts tarpitting you # if you send too many requests at once, so you end up with the server carefully # working through the backlog of your requests, which you have already timed # out. # # - for this request in particular, we now (as of # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the # server can't produce a plausible-looking set of prev_events - so we becone # much more likely to reject the event. # # - contrary to what it says above, we do *not* fall back to fetching fresh state # for the room if get_missing_events times out. Rather, we give up processing # the PDU whose prevs we are missing, which then makes it much more likely that # we'll end up back here for the *next* PDU in the list, which exacerbates the # problem. # # - the aggressive 10s timeout was introduced to deal with incoming federation # requests taking 8 hours to process. It's not entirely clear why that was going # on; certainly there were other issues causing traffic storms which are now # resolved, and I think in any case we may be more sensible about our locking # now. We're *certainly* more sensible about our logging. # # All that said: Let's try increasing the timeout to 60s and see what happens. try: missing_events = await self.federation_client.get_missing_events( origin, room_id, earliest_events_ids=list(latest), latest_events=[pdu], limit=10, min_depth=min_depth, timeout=60000, ) except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e: # We failed to get the missing events, but since we need to handle # the case of `get_missing_events` not returning the necessary # events anyway, it is safe to simply log the error and continue. logger.warning( "[%s %s]: Failed to get prev_events: %s", room_id, event_id, e ) return logger.info( "[%s %s]: Got %d prev_events: %s", room_id, event_id, len(missing_events), shortstr(missing_events), ) # We want to sort these by depth so we process them and # tell clients about them in order. missing_events.sort(key=lambda x: x.depth) for ev in missing_events: logger.info( "[%s %s] Handling received prev_event %s", room_id, event_id, ev.event_id, ) with nested_logging_context(ev.event_id): try: await self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: if e.code == 403: logger.warning( "[%s %s] Received prev_event %s failed history check.", room_id, event_id, ev.event_id, ) else: raise async def _get_state_for_room( self, destination: str, room_id: str, event_id: str, include_event_in_state: bool = False, ) -> Tuple[List[EventBase], List[EventBase]]: """Requests all of the room state at a given event from a remote homeserver. Args: destination: The remote homeserver to query for the state. room_id: The id of the room we're interested in. event_id: The id of the event we want the state at. include_event_in_state: if true, the event itself will be included in the returned state event list. Returns: A list of events in the state, possibly including the event itself, and a list of events in the auth chain for the given event. """ ( state_event_ids, auth_event_ids, ) = await self.federation_client.get_room_state_ids( destination, room_id, event_id=event_id ) desired_events = set(state_event_ids + auth_event_ids) if include_event_in_state: desired_events.add(event_id) event_map = await self._get_events_from_store_or_dest( destination, room_id, desired_events ) failed_to_fetch = desired_events - event_map.keys() if failed_to_fetch: logger.warning( "Failed to fetch missing state/auth events for %s %s", event_id, failed_to_fetch, ) remote_state = [ event_map[e_id] for e_id in state_event_ids if e_id in event_map ] if include_event_in_state: remote_event = event_map.get(event_id) if not remote_event: raise Exception("Unable to get missing prev_event %s" % (event_id,)) if remote_event.is_state() and remote_event.rejected_reason is None: remote_state.append(remote_event) auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] auth_chain.sort(key=lambda e: e.depth) return remote_state, auth_chain async def _get_events_from_store_or_dest( self, destination: str, room_id: str, event_ids: Iterable[str] ) -> Dict[str, EventBase]: """Fetch events from a remote destination, checking if we already have them. Persists any events we don't already have as outliers. If we fail to fetch any of the events, a warning will be logged, and the event will be omitted from the result. Likewise, any events which turn out not to be in the given room. This function *does not* automatically get missing auth events of the newly fetched events. Callers must include the full auth chain of of the missing events in the `event_ids` argument, to ensure that any missing auth events are correctly fetched. Returns: map from event_id to event """ fetched_events = await self.store.get_events(event_ids, allow_rejected=True) missing_events = set(event_ids) - fetched_events.keys() if missing_events: logger.debug( "Fetching unknown state/auth events %s for room %s", missing_events, room_id, ) await self._get_events_and_persist( destination=destination, room_id=room_id, events=missing_events ) # we need to make sure we re-load from the database to get the rejected # state correct. fetched_events.update( (await self.store.get_events(missing_events, allow_rejected=True)) ) # check for events which were in the wrong room. # # this can happen if a remote server claims that the state or # auth_events at an event in room A are actually events in room B bad_events = [ (event_id, event.room_id) for event_id, event in fetched_events.items() if event.room_id != room_id ] for bad_event_id, bad_room_id in bad_events: # This is a bogus situation, but since we may only discover it a long time # after it happened, we try our best to carry on, by just omitting the # bad events from the returned auth/state set. logger.warning( "Remote server %s claims event %s in room %s is an auth/state " "event in room %s", destination, bad_event_id, bad_room_id, room_id, ) del fetched_events[bad_event_id] return fetched_events async def _process_received_pdu( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], ): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. Args: origin: server sending the event event: event to be persisted state: Normally None, but if we are handling a gap in the graph (ie, we are missing one or more prev_events), the resolved state at the event """ room_id = event.room_id event_id = event.event_id logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) try: await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") sender_key = event.content.get("sender_key") cached_devices = await self.store.get_cached_devices_for_user(event.sender) resync = False # Whether we should resync device lists. device = None if device_id is not None: device = cached_devices.get(device_id) if device is None: logger.info( "Received event from remote device not in our cache: %s %s", event.sender, device_id, ) resync = True # We also check if the `sender_key` matches what we expect. if sender_key is not None: # Figure out what sender key we're expecting. If we know the # device and recognize the algorithm then we can work out the # exact key to expect. Otherwise check it matches any key we # have for that device. current_keys = [] # type: Container[str] if device: keys = device.get("keys", {}).get("keys", {}) if ( event.content.get("algorithm") == RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2 ): # For this algorithm we expect a curve25519 key. key_name = "curve25519:%s" % (device_id,) current_keys = [keys.get(key_name)] else: # We don't know understand the algorithm, so we just # check it matches a key for the device. current_keys = keys.values() elif device_id: # We don't have any keys for the device ID. pass else: # The event didn't include a device ID, so we just look for # keys across all devices. current_keys = [ key for device in cached_devices.values() for key in device.get("keys", {}).get("keys", {}).values() ] # We now check that the sender key matches (one of) the expected # keys. if sender_key not in current_keys: logger.info( "Received event from remote device with unexpected sender key: %s %s: %s", event.sender, device_id or "<no device_id>", sender_key, ) resync = True if resync: run_as_background_process( "resync_device_due_to_pdu", self._resync_device, event.sender ) async def _resync_device(self, sender: str) -> None: """We have detected that the device list for the given user may be out of sync, so we try and resync them. """ try: await self.store.mark_remote_user_device_cache_as_stale(sender) # Immediately attempt a resync in the background if self.config.worker_app: await self._user_device_resync(user_id=sender) else: await self._device_list_updater.user_device_resync(sender) except Exception: logger.exception("Failed to resync device for %s", sender) @log_function async def backfill(self, dest, room_id, limit, extremities): """ Trigger a backfill request to `dest` for the given `room_id` This will attempt to get more events from the remote. If the other side has no new events to offer, this will return an empty list. As the events are received, we check their signatures, and also do some sanity-checking on them. If any of the backfilled events are invalid, this method throws a SynapseError. TODO: make this more useful to distinguish failures of the remote server from invalid events (there is probably no point in trying to re-fetch invalid events from every other HS in the room.) """ if dest == self.server_name: raise SynapseError(400, "Can't backfill from self.") events = await self.federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) if not events: return [] # ideally we'd sanity check the events here for excess prev_events etc, # but it's hard to reject events at this point without completely # breaking backfill in the same way that it is currently broken by # events whose signature we cannot verify (#3121). # # So for now we accept the events anyway. #3124 tracks this. # # for ev in events: # self._sanity_check_event(ev) # Don't bother processing events we already have. seen_events = await self.store.have_events_in_timeline( {e.event_id for e in events} ) events = [e for e in events if e.event_id not in seen_events] if not events: return [] event_map = {e.event_id: e for e in events} event_ids = {e.event_id for e in events} # build a list of events whose prev_events weren't in the batch. # (XXX: this will include events whose prev_events we already have; that doesn't # sound right?) edges = [ev.event_id for ev in events if set(ev.prev_event_ids()) - event_ids] logger.info("backfill: Got %d events with %d edges", len(events), len(edges)) # For each edge get the current state. auth_events = {} state_events = {} events_to_state = {} for e_id in edges: state, auth = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id, include_event_in_state=False, ) auth_events.update({a.event_id: a for a in auth}) auth_events.update({s.event_id: s for s in state}) state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state required_auth = { a_id for event in events + list(state_events.values()) + list(auth_events.values()) for a_id in event.auth_event_ids() } auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) ev_infos = [] # Step 1: persist the events in the chunk we fetched state for (i.e. # the backwards extremities), with custom auth events and state for e_id in events_to_state: # For paranoia we ensure that these events are marked as # non-outliers ev = event_map[e_id] assert not ev.internal_metadata.is_outlier() ev_infos.append( _NewEventInfo( event=ev, state=events_to_state[e_id], auth_events={ ( auth_events[a_id].type, auth_events[a_id].state_key, ): auth_events[a_id] for a_id in ev.auth_event_ids() if a_id in auth_events }, ) ) if ev_infos: await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) for event in events: if event in events_to_state: continue # For paranoia we ensure that these events are marked as # non-outliers assert not event.internal_metadata.is_outlier() # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. await self._handle_new_event(dest, event, backfilled=True) return events async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: """Checks the database to see if we should backfill before paginating, and if so do. Args: room_id current_depth: The depth from which we're paginating from. This is used to decide if we should backfill and what extremities to use. limit: The number of events that the pagination request will return. This is used as part of the heuristic to decide if we should back paginate. """ extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") return False # We only want to paginate if we can actually see the events we'll get, # as otherwise we'll just spend a lot of resources to get redacted # events. # # We do this by filtering all the backwards extremities and seeing if # any remain. Given we don't have the extremity events themselves, we # need to actually check the events that reference them. # # *Note*: the spec wants us to keep backfilling until we reach the start # of the room in case we are allowed to see some of the history. However # in practice that causes more issues than its worth, as a) its # relatively rare for there to be any visible history and b) even when # there is its often sufficiently long ago that clients would stop # attempting to paginate before backfill reached the visible history. # # TODO: If we do do a backfill then we should filter the backwards # extremities to only include those that point to visible portions of # history. # # TODO: Correctly handle the case where we are allowed to see the # forward event but not the backward extremity, e.g. in the case of # initial join of the server where we are allowed to see the join # event but not anything before it. This would require looking at the # state *before* the event, ignoring the special casing certain event # types have. forward_events = await self.store.get_successor_events(list(extremities)) extremities_events = await self.store.get_events( forward_events, redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, ) # We set `check_history_visibility_only` as we might otherwise get false # positives from users having been erased. filtered_extremities = await filter_events_for_server( self.storage, self.server_name, list(extremities_events.values()), redact=False, check_history_visibility_only=True, ) if not filtered_extremities: return False # Check if we reached a point where we should start backfilling. sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) max_depth = sorted_extremeties_tuple[0][1] # If we're approaching an extremity we trigger a backfill, otherwise we # no-op. # # We chose twice the limit here as then clients paginating backwards # will send pagination requests that trigger backfill at least twice # using the most recent extremity before it gets removed (see below). We # chose more than one times the limit in case of failure, but choosing a # much larger factor will result in triggering a backfill request much # earlier than necessary. if current_depth - 2 * limit > max_depth: logger.debug( "Not backfilling as we don't need to. %d < %d - 2 * %d", max_depth, current_depth, limit, ) return False logger.debug( "room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s", room_id, current_depth, max_depth, sorted_extremeties_tuple, ) # We ignore extremities that have a greater depth than our current depth # as: # 1. we don't really care about getting events that have happened # before our current position; and # 2. we have likely previously tried and failed to backfill from that # extremity, so to avoid getting "stuck" requesting the same # backfill repeatedly we drop those extremities. filtered_sorted_extremeties_tuple = [ t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth ] # However, we need to check that the filtered extremities are non-empty. # If they are empty then either we can a) bail or b) still attempt to # backill. We opt to try backfilling anyway just in case we do get # relevant events. if filtered_sorted_extremeties_tuple: sorted_extremeties_tuple = filtered_sorted_extremeties_tuple # We don't want to specify too many extremities as it causes the backfill # request URI to be too long. extremities = dict(sorted_extremeties_tuple[:5]) # Now we need to decide which hosts to hit first. # First we try hosts that are already in the room # TODO: HEURISTIC ALERT. curr_state = await self.state_handler.get_current_state(room_id) def get_domains_from_state(state): """Get joined domains from state Args: state (dict[tuple, FrozenEvent]): State map from type/state key to event. Returns: list[tuple[str, int]]: Returns a list of servers with the lowest depth of their joins. Sorted by lowest depth first. """ joined_users = [ (state_key, int(event.depth)) for (e_type, state_key), event in state.items() if e_type == EventTypes.Member and event.membership == Membership.JOIN ] joined_domains = {} # type: Dict[str, int] for u, d in joined_users: try: dom = get_domain_from_id(u) old_d = joined_domains.get(dom) if old_d: joined_domains[dom] = min(d, old_d) else: joined_domains[dom] = d except Exception: pass return sorted(joined_domains.items(), key=lambda d: d[1]) curr_domains = get_domains_from_state(curr_state) likely_domains = [ domain for domain, depth in curr_domains if domain != self.server_name ] async def try_backfill(domains): # TODO: Should we try multiple of these at a time? for dom in domains: try: await self.backfill( dom, room_id, limit=100, extremities=extremities ) # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. return True except SynapseError as e: logger.info("Failed to backfill from %s because %s", dom, e) continue except HttpResponseException as e: if 400 <= e.code < 500: raise e.to_synapse_error() logger.info("Failed to backfill from %s because %s", dom, e) continue except CodeMessageException as e: if 400 <= e.code < 500: raise logger.info("Failed to backfill from %s because %s", dom, e) continue except NotRetryingDestination as e: logger.info(str(e)) continue except RequestSendFailed as e: logger.info("Failed to get backfill from %s because %s", dom, e) continue except FederationDeniedError as e: logger.info(e) continue except Exception as e: logger.exception("Failed to backfill from %s because %s", dom, e) continue return False success = await try_backfill(likely_domains) if success: return True # Huh, well *those* domains didn't work out. Lets try some domains # from the time. tried_domains = set(likely_domains) tried_domains.add(self.server_name) event_ids = list(extremities.keys()) logger.debug("calling resolve_state_groups in _maybe_backfill") resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) states = await make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) ) # dict[str, dict[tuple, str]], a map from event_id to state map of # event_ids. states = dict(zip(event_ids, [s.state for s in states])) state_map = await self.store.get_events( [e_id for ids in states.values() for e_id in ids.values()], get_prev_content=False, ) states = { key: { k: state_map[e_id] for k, e_id in state_dict.items() if e_id in state_map } for key, state_dict in states.items() } for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id]) success = await try_backfill( [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: return True tried_domains.update(dom for dom, _ in likely_domains) return False async def _get_events_and_persist( self, destination: str, room_id: str, events: Iterable[str] ): """Fetch the given events from a server, and persist them as outliers. This function *does not* recursively get missing auth events of the newly fetched events. Callers must include in the `events` argument any missing events from the auth chain. Logs a warning if we can't find the given event. """ room_version = await self.store.get_room_version(room_id) event_map = {} # type: Dict[str, EventBase] async def get_event(event_id: str): with nested_logging_context(event_id): try: event = await self.federation_client.get_pdu( [destination], event_id, room_version, outlier=True, ) if event is None: logger.warning( "Server %s didn't return event %s", destination, event_id, ) return event_map[event.event_id] = event except Exception as e: logger.warning( "Error fetching missing state/auth event %s: %s %s", event_id, type(e), e, ) await concurrently_execute(get_event, events, 5) # Make a map of auth events for each event. We do this after fetching # all the events as some of the events' auth events will be in the list # of requested events. auth_events = [ aid for event in event_map.values() for aid in event.auth_event_ids() if aid not in event_map ] persisted_events = await self.store.get_events( auth_events, allow_rejected=True, ) event_infos = [] for event in event_map.values(): auth = {} for auth_event_id in event.auth_event_ids(): ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id) if ae: auth[(ae.type, ae.state_key)] = ae else: logger.info("Missing auth event %s", auth_event_id) event_infos.append(_NewEventInfo(event, None, auth)) await self._handle_new_events( destination, room_id, event_infos, ) def _sanity_check_event(self, ev): """ Do some early sanity checks of a received event In particular, checks it doesn't have an excessive number of prev_events or auth_events, which could cause a huge state resolution or cascade of event fetches. Args: ev (synapse.events.EventBase): event to be checked Returns: None Raises: SynapseError if the event does not pass muster """ if len(ev.prev_event_ids()) > 20: logger.warning( "Rejecting event %s which has %i prev_events", ev.event_id, len(ev.prev_event_ids()), ) raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events") if len(ev.auth_event_ids()) > 10: logger.warning( "Rejecting event %s which has %i auth_events", ev.event_id, len(ev.auth_event_ids()), ) raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events") async def send_invite(self, target_host, event): """ Sends the invite to the remote server for signing. Invites must be signed by the invitee's server before distribution. """ pdu = await self.federation_client.send_invite( destination=target_host, room_id=event.room_id, event_id=event.event_id, pdu=event, ) return pdu async def on_event_auth(self, event_id: str) -> List[EventBase]: event = await self.store.get_event(event_id) auth = await self.store.get_auth_chain( list(event.auth_event_ids()), include_given=True ) return list(auth) async def do_invite_join( self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict ) -> Tuple[str, int]: """ Attempts to join the `joinee` to the room `room_id` via the servers contained in `target_hosts`. This first triggers a /make_join/ request that returns a partial event that we can fill out and sign. This is then sent to the remote server via /send_join/ which responds with the state at that event and the auth_chains. We suspend processing of any received events from this room until we have finished processing the join. Args: target_hosts: List of servers to attempt to join the room with. room_id: The ID of the room to join. joinee: The User ID of the joining user. content: The event content to use for the join event. """ # TODO: We should be able to call this on workers, but the upgrading of # room stuff after join currently doesn't work on workers. assert self.config.worker.worker_app is None logger.debug("Joining %s to %s", joinee, room_id) origin, event, room_version_obj = await self._make_and_verify_event( target_hosts, room_id, joinee, "join", content, params={"ver": KNOWN_ROOM_VERSIONS}, ) # This shouldn't happen, because the RoomMemberHandler has a # linearizer lock which only allows one operation per user per room # at a time - so this is just paranoia. assert room_id not in self.room_queues self.room_queues[room_id] = [] await self._clean_room_for_join(room_id) handled_events = set() try: # Try the host we successfully got a response to /make_join/ # request first. host_list = list(target_hosts) try: host_list.remove(origin) host_list.insert(0, origin) except ValueError: pass ret = await self.federation_client.send_join( host_list, event, room_version_obj ) origin = ret["origin"] state = ret["state"] auth_chain = ret["auth_chain"] auth_chain.sort(key=lambda e: e.depth) handled_events.update([s.event_id for s in state]) handled_events.update([a.event_id for a in auth_chain]) handled_events.add(event.event_id) logger.debug("do_invite_join auth_chain: %s", auth_chain) logger.debug("do_invite_join state: %s", state) logger.debug("do_invite_join event: %s", event) # if this is the first time we've joined this room, it's time to add # a row to `rooms` with the correct room version. If there's already a # row there, we should override it, since it may have been populated # based on an invite request which lied about the room version. # # federation_client.send_join has already checked that the room # version in the received create event is the same as room_version_obj, # so we can rely on it now. # await self.store.upsert_room_on_join( room_id=room_id, room_version=room_version_obj, ) max_stream_id = await self._persist_auth_tree( origin, room_id, auth_chain, state, event, room_version_obj ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. await self._replication.wait_for_stream_position( self.config.worker.events_shard_config.get_instance(room_id), "events", max_stream_id, ) # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information predecessor = await self.store.get_room_predecessor(room_id) if not predecessor or not isinstance(predecessor.get("room_id"), str): return event.event_id, max_stream_id old_room_id = predecessor["room_id"] logger.debug( "Found predecessor for %s during remote join: %s", room_id, old_room_id ) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.transfer_room_state_on_room_upgrade( old_room_id, room_id ) logger.debug("Finished joining %s to %s", joinee, room_id) return event.event_id, max_stream_id finally: room_queue = self.room_queues[room_id] del self.room_queues[room_id] # we don't need to wait for the queued events to be processed - # it's just a best-effort thing at this point. We do want to do # them roughly in order, though, otherwise we'll end up making # lots of requests for missing prev_events which we do actually # have. Hence we fire off the background task, but don't wait for it. run_in_background(self._handle_queued_pdus, room_queue) async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. Args: room_queue (list[FrozenEvent, str]): list of PDUs to be processed and the servers that sent them """ for p, origin in room_queue: try: logger.info( "Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id, ) with nested_logging_context(p.event_id): await self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warning( "Error handling queued PDU %s from %s: %s", p.event_id, origin, e ) async def on_make_join_request( self, origin: str, room_id: str, user_id: str ) -> EventBase: """ We've received a /make_join/ request, so we create a partial join event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: origin: The (verified) server name of the requesting server. room_id: Room to create join event in user_id: The user to create the join for """ if get_domain_from_id(user_id) != origin: logger.info( "Got /make_join request for user %r from different origin %s, ignoring", user_id, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) # checking the room version will check that we've actually heard of the room # (and return a 404 otherwise) room_version = await self.store.get_room_version_id(room_id) # now check that we are *still* in the room is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "Got /make_join request for room %s we are no longer in", room_id, ) raise NotFoundError("Not an active room on this server") event_content = {"membership": Membership.JOIN} builder = self.event_builder_factory.new( room_version, { "type": EventTypes.Member, "content": event_content, "room_id": room_id, "sender": user_id, "state_key": user_id, }, ) try: event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) except SynapseError as e: logger.warning("Failed to create join to %s because %s", room_id, e) raise # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) return event async def on_send_join_request(self, origin, pdu): """ We have received a join event for a room. Fully process it and respond with the current state and auth chains. """ event = pdu logger.debug( "on_send_join_request from %s: Got event: %s, signatures: %s", origin, event.event_id, event.signatures, ) if get_domain_from_id(event.sender) != origin: logger.info( "Got /send_join request for user %r from different origin %s", event.sender, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) event.internal_metadata.outlier = False # Send this event on behalf of the origin server. # # The reasons we have the destination server rather than the origin # server send it are slightly mysterious: the origin server should have # all the necessary state once it gets the response to the send_join, # so it could send the event itself if it wanted to. It may be that # doing it this way reduces failure modes, or avoids certain attacks # where a new server selectively tells a subset of the federation that # it has joined. # # The fact is that, as of the current writing, Synapse doesn't send out # the join event over federation after joining, and changing it now # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin context = await self._handle_new_event(origin, event) logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(state_ids) state = await self.store.get_events(list(prev_state_ids.values())) return {"state": list(state.values()), "auth_chain": auth_chain} async def on_invite_request( self, origin: str, event: EventBase, room_version: RoomVersion ): """ We've got an invite event. Process and persist it. Sign it. Respond with the now signed event. """ if event.state_key is None: raise SynapseError(400, "The invite event did not have a state key") is_blocked = await self.store.is_room_blocked(event.room_id) if is_blocked: raise SynapseError(403, "This room has been blocked on this server") if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") if not self.spam_checker.user_may_invite( event.sender, event.state_key, event.room_id ): raise SynapseError( 403, "This user is not permitted to send invites to this server/user" ) membership = event.content.get("membership") if event.type != EventTypes.Member or membership != Membership.INVITE: raise SynapseError(400, "The event was not an m.room.member invite event") sender_domain = get_domain_from_id(event.sender) if sender_domain != origin: raise SynapseError( 400, "The invite event was not from the server sending it" ) if not self.is_mine_id(event.state_key): raise SynapseError(400, "The invite event must be for this server") # block any attempts to invite the server notices mxid if event.state_key == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") # keep a record of the room version, if we don't yet know it. # (this may get overwritten if we later get a different room version in a # join dance). await self._maybe_store_room_on_outlier_membership( room_id=event.room_id, room_version=room_version ) event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True event.signatures.update( compute_event_signature( room_version, event.get_pdu_json(), self.hs.hostname, self.hs.signing_key, ) ) context = await self.state_handler.compute_event_context(event) await self.persist_events_and_notify(event.room_id, [(event, context)]) return event async def do_remotely_reject_invite( self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict ) -> Tuple[EventBase, int]: origin, event, room_version = await self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not # even in the room. event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True # Try the host that we successfully called /make_leave/ on first for # the /send_leave/ request. host_list = list(target_hosts) try: host_list.remove(origin) host_list.insert(0, origin) except ValueError: pass await self.federation_client.send_leave(host_list, event) context = await self.state_handler.compute_event_context(event) stream_id = await self.persist_events_and_notify( event.room_id, [(event, context)] ) return event, stream_id async def _make_and_verify_event( self, target_hosts: Iterable[str], room_id: str, user_id: str, membership: str, content: JsonDict = {}, params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, ) -> Tuple[str, EventBase, RoomVersion]: ( origin, event, room_version, ) = await self.federation_client.make_membership_event( target_hosts, room_id, user_id, membership, content, params=params ) logger.debug("Got response to make_%s: %s", membership, event) # We should assert some things. # FIXME: Do this in a nicer way assert event.type == EventTypes.Member assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id return origin, event, room_version async def on_make_leave_request( self, origin: str, room_id: str, user_id: str ) -> EventBase: """ We've received a /make_leave/ request, so we create a partial leave event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: origin: The (verified) server name of the requesting server. room_id: Room to create leave event in user_id: The user to create the leave for """ if get_domain_from_id(user_id) != origin: logger.info( "Got /make_leave request for user %r from different origin %s, ignoring", user_id, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, { "type": EventTypes.Member, "content": {"membership": Membership.LEAVE}, "room_id": room_id, "sender": user_id, "state_key": user_id, }, ) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) raise e return event async def on_send_leave_request(self, origin, pdu): """ We have received a leave event for a room. Fully process it.""" event = pdu logger.debug( "on_send_leave_request: Got event: %s, signatures: %s", event.event_id, event.signatures, ) if get_domain_from_id(event.sender) != origin: logger.info( "Got /send_leave request for user %r from different origin %s", event.sender, origin, ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) event.internal_metadata.outlier = False await self._handle_new_event(origin, event) logger.debug( "on_send_leave_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) return None async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]: """Returns the state at the event. i.e. not including said event. """ event = await self.store.get_event(event_id, check_room_id=room_id) state_groups = await self.state_store.get_state_groups(room_id, [event_id]) if state_groups: _, state = list(state_groups.items()).pop() results = {(e.type, e.state_key): e for e in state} if event.is_state(): # Get previous state if "replaces_state" in event.unsigned: prev_id = event.unsigned["replaces_state"] if prev_id != event.event_id: prev_event = await self.store.get_event(prev_id) results[(event.type, event.state_key)] = prev_event else: del results[(event.type, event.state_key)] res = list(results.values()) return res else: return [] async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: """Returns the state at the event. i.e. not including said event. """ event = await self.store.get_event(event_id, check_room_id=room_id) state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) if state_groups: _, state = list(state_groups.items()).pop() results = state if event.is_state(): # Get previous state if "replaces_state" in event.unsigned: prev_id = event.unsigned["replaces_state"] if prev_id != event.event_id: results[(event.type, event.state_key)] = prev_id else: results.pop((event.type, event.state_key), None) return list(results.values()) else: return [] @log_function async def on_backfill_request( self, origin: str, room_id: str, pdu_list: List[str], limit: int ) -> List[EventBase]: in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # Synapse asks for 100 events per backfill request. Do not allow more. limit = min(limit, 100) events = await self.store.get_backfill_events(room_id, pdu_list, limit) events = await filter_events_for_server(self.storage, origin, events) return events @log_function async def get_persisted_pdu( self, origin: str, event_id: str ) -> Optional[EventBase]: """Get an event from the database for the given server. Args: origin: hostname of server which is requesting the event; we will check that the server is allowed to see it. event_id: id of the event being requested Returns: None if we know nothing about the event; otherwise the (possibly-redacted) event. Raises: AuthError if the server is not currently in the room """ event = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) if event: in_room = await self.auth.check_host_in_room(event.room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") events = await filter_events_for_server(self.storage, origin, [event]) event = events[0] return event else: return None async def get_min_depth_for_context(self, context): return await self.store.get_min_depth(context) async def _handle_new_event( self, origin, event, state=None, auth_events=None, backfilled=False ): context = await self._prep_event( origin, event, state=state, auth_events=auth_events, backfilled=backfilled ) try: if ( not event.internal_metadata.is_outlier() and not backfilled and not context.rejected ): await self.action_generator.handle_push_actions_for_event( event, context ) await self.persist_events_and_notify( event.room_id, [(event, context)], backfilled=backfilled ) except Exception: run_in_background( self.store.remove_push_actions_from_staging, event.event_id ) raise return context async def _handle_new_events( self, origin: str, room_id: str, event_infos: Iterable[_NewEventInfo], backfilled: bool = False, ) -> None: """Creates the appropriate contexts and persists events. The events should not depend on one another, e.g. this should be used to persist a bunch of outliers, but not a chunk of individual events that depend on each other for state calculations. Notifies about the events where appropriate. """ async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): res = await self._prep_event( origin, event, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, ) return res contexts = await make_deferred_yieldable( defer.gatherResults( [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) await self.persist_events_and_notify( room_id, [ (ev_info.event, context) for ev_info, context in zip(event_infos, contexts) ], backfilled=backfilled, ) async def _persist_auth_tree( self, origin: str, room_id: str, auth_events: List[EventBase], state: List[EventBase], event: EventBase, room_version: RoomVersion, ) -> int: """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event separately. Notifies about the persisted events where appropriate. Will attempt to fetch missing auth events. Args: origin: Where the events came from room_id, auth_events state event room_version: The room version we expect this room to have, and will raise if it doesn't match the version in the create event. """ events_to_context = {} for e in itertools.chain(auth_events, state): e.internal_metadata.outlier = True ctx = await self.state_handler.compute_event_context(e) events_to_context[e.event_id] = ctx event_map = { e.event_id: e for e in itertools.chain(auth_events, state, [event]) } create_event = None for e in auth_events: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") room_version_id = create_event.content.get( "room_version", RoomVersions.V1.identifier ) if room_version.identifier != room_version_id: raise SynapseError(400, "Room version mismatch") missing_auth_events = set() for e in itertools.chain(auth_events, state, [event]): for e_id in e.auth_event_ids(): if e_id not in event_map: missing_auth_events.add(e_id) for e_id in missing_auth_events: m_ev = await self.federation_client.get_pdu( [origin], e_id, room_version=room_version, outlier=True, timeout=10000, ) if m_ev and m_ev.event_id == e_id: event_map[e_id] = m_ev else: logger.info("Failed to find auth event %r", e_id) for e in itertools.chain(auth_events, state, [event]): auth_for_e = { (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id] for e_id in e.auth_event_ids() if e_id in event_map } if create_event: auth_for_e[(EventTypes.Create, "")] = create_event try: event_auth.check(room_version, e, auth_events=auth_for_e) except SynapseError as err: # we may get SynapseErrors here as well as AuthErrors. For # instance, there are a couple of (ancient) events in some # rooms whose senders do not have the correct sigil; these # cause SynapseErrors in auth.check. We don't want to give up # the attempt to federate altogether in such cases. logger.warning("Rejecting %s because %s", e.event_id, err.msg) if e == event: raise events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR await self.persist_events_and_notify( room_id, [ (e, events_to_context[e.event_id]) for e in itertools.chain(auth_events, state) ], ) new_event_context = await self.state_handler.compute_event_context( event, old_state=state ) return await self.persist_events_and_notify( room_id, [(event, new_event_context)] ) async def _prep_event( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], auth_events: Optional[MutableStateMap[EventBase]], backfilled: bool, ) -> EventContext: context = await self.state_handler.compute_event_context(event, old_state=state) if not auth_events: prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self.auth.compute_auth_events( event, prev_state_ids, for_verification=True ) auth_events_x = await self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} # This is a hack to fix some old rooms where the initial join event # didn't reference the create event in its auth events. if event.type == EventTypes.Member and not event.auth_event_ids(): if len(event.prev_event_ids()) == 1 and event.depth < 5: c = await self.store.get_event( event.prev_event_ids()[0], allow_none=True ) if c and c.type == EventTypes.Create: auth_events[(c.type, c.state_key)] = c context = await self.do_auth(origin, event, context, auth_events=auth_events) if not context.rejected: await self._check_for_soft_fail(event, state, backfilled) if event.type == EventTypes.GuestAccess and not context.rejected: await self.maybe_kick_guest_users(event) return context async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: """Checks if we should soft fail the event; if so, marks the event as such. Args: event state: The state at the event if we don't have all the event's prev events backfilled: Whether the event is from backfill """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. if backfilled or event.internal_metadata.is_outlier(): return extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids_list) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: # If they're the same then the current state is the same as the # state at the event, so no point rechecking auth for soft fail. return room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". if state is not None: # If we're explicitly given the state then we won't have all the # prev events, and so we have a gap in the graph. In this case # we want to be a little careful as we might have been down for # a while and have an incorrect view of the current state, # however we still want to do checks as gaps are easy to # maliciously manufacture. # # So we use a "current state" that is actually a state # resolution across the current forward extremities and the # given state at the event. This should correctly handle cases # like bans, especially with state res v2. state_sets_d = await self.state_store.get_state_groups( event.room_id, extrem_ids ) state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]] state_sets.append(state) current_states = await self.state_handler.resolve_events( room_version, state_sets, event ) current_state_ids = { k: e.event_id for k, e in current_states.items() } # type: StateMap[str] else: current_state_ids = await self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids ) logger.debug( "Doing soft-fail check for %s: state %s", event.event_id, current_state_ids, ) # Now check if event pass auth against said current state auth_types = auth_types_for_event(event) current_state_ids_list = [ e for k, e in current_state_ids.items() if k in auth_types ] auth_events_map = await self.store.get_events(current_state_ids_list) current_auth_events = { (e.type, e.state_key): e for e in auth_events_map.values() } try: event_auth.check(room_version_obj, event, auth_events=current_auth_events) except AuthError as e: logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True async def on_query_auth( self, origin, event_id, room_id, remote_auth_chain, rejects, missing ): in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") event = await self.store.get_event(event_id, check_room_id=room_id) # Just go through and process each event in `remote_auth_chain`. We # don't want to fall into the trap of `missing` being wrong. for e in remote_auth_chain: try: await self._handle_new_event(origin, e) except AuthError: pass # Now get the current auth_chain for the event. local_auth_chain = await self.store.get_auth_chain( list(event.auth_event_ids()), include_given=True ) # TODO: Check if we would now reject event_id. If so we need to tell # everyone. ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) logger.debug("on_query_auth returning: %s", ret) return ret async def on_get_missing_events( self, origin, room_id, earliest_events, latest_events, limit ): in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") # Only allow up to 20 events to be retrieved per request. limit = min(limit, 20) missing_events = await self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, ) missing_events = await filter_events_for_server( self.storage, origin, missing_events ) return missing_events async def do_auth( self, origin: str, event: EventBase, context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: """ Args: origin: event: context: auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room at the event's position in the DAG, though occasionally (eg if the event is an outlier), may be the auth events claimed by the remote server. Also NB that this function adds entries to it. Returns: updated context object """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events ) except Exception: # We don't really mind if the above fails, so lets not fail # processing if it does. However, it really shouldn't fail so # let's still log as an exception since we'll still want to fix # any bugs. logger.exception( "Failed to double check auth events for %s with remote. " "Ignoring failure and continuing processing of event.", event.event_id, ) try: event_auth.check(room_version_obj, event, auth_events=auth_events) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR return context async def _update_auth_events_and_context_for_auth( self, origin: str, event: EventBase, context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: """Helper for do_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if we can come to a consensus (e.g. if one server missed some valid state). This attempts to resolve any potential divergence of state between servers, but is not essential and so failures should not block further processing of the event. Args: origin: event: context: auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room at the event's position in the DAG, though occasionally (eg if the event is an outlier), may be the auth events claimed by the remote server. Also NB that this function adds entries to it. Returns: updated context """ event_auth_events = set(event.auth_event_ids()) # missing_auth is the set of the event's auth_events which we don't yet have # in auth_events. missing_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) # if we have missing events, we need to fetch those events from somewhere. # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: have_events = await self.store.have_seen_events(missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) if missing_auth: # If we don't have all the auth events, we need to get them. logger.info("auth_events contains unknown events: %s", missing_auth) try: try: remote_auth_chain = await self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) except RequestSendFailed as e1: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. logger.info("Failed to get event auth from remote: %s", e1) return context seen_remotes = await self.store.have_seen_events( [e.event_id for e in remote_auth_chain] ) for e in remote_auth_chain: if e.event_id in seen_remotes: continue if e.event_id == event.event_id: continue try: auth_ids = e.auth_event_ids() auth = { (e.type, e.state_key): e for e in remote_auth_chain if e.event_id in auth_ids or e.type == EventTypes.Create } e.internal_metadata.outlier = True logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e except AuthError: pass except Exception: logger.exception("Failed to get auth chain") if event.internal_metadata.is_outlier(): # XXX: given that, for an outlier, we'll be working with the # event's *claimed* auth events rather than those we calculated: # (a) is there any point in this test, since different_auth below will # obviously be empty # (b) alternatively, why don't we do it earlier? logger.info("Skipping auth_event fetch for outlier") return context different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) if not different_auth: return context logger.info( "auth_events refers to events which are not in our calculated auth " "chain: %s", different_auth, ) # XXX: currently this checks for redactions but I'm not convinced that is # necessary? different_events = await self.store.get_events_as_list(different_auth) for d in different_events: if d.room_id != event.room_id: logger.warning( "Event %s refers to auth_event %s which is in a different room", event.event_id, d.event_id, ) # don't attempt to resolve the claimed auth events against our own # in this case: just use our own auth events. # # XXX: should we reject the event in this case? It feels like we should, # but then shouldn't we also do so if we've failed to fetch any of the # auth events? return context # now we state-resolve between our own idea of the auth events, and the remote's # idea of them. local_state = auth_events.values() remote_auth_events = dict(auth_events) remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_state = remote_auth_events.values() room_version = await self.store.get_room_version_id(event.room_id) new_state = await self.state_handler.resolve_events( room_version, (local_state, remote_state), event ) logger.info( "After state res: updating auth_events with new state %s", { (d.type, d.state_key): d.event_id for d in new_state.values() if auth_events.get((d.type, d.state_key)) != d }, ) auth_events.update(new_state) context = await self._update_context_for_auth_events( event, context, auth_events ) return context async def _update_context_for_auth_events( self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] ) -> EventContext: """Update the state_ids in an event context after auth event resolution, storing the changes as a new state group. Args: event: The event we're handling the context for context: initial event context auth_events: Events to update in the event context. Returns: new event context """ # exclude the state key of the new event from the current_state in the context. if event.is_state(): event_key = (event.type, event.state_key) # type: Optional[Tuple[str, str]] else: event_key = None state_updates = { k: a.event_id for k, a in auth_events.items() if k != event_key } current_state_ids = await context.get_current_state_ids() current_state_ids = dict(current_state_ids) # type: ignore current_state_ids.update(state_updates) prev_state_ids = await context.get_prev_state_ids() prev_state_ids = dict(prev_state_ids) prev_state_ids.update({k: a.event_id for k, a in auth_events.items()}) # create a new state group as a delta from the existing one. prev_group = context.state_group state_group = await self.state_store.store_state_group( event.event_id, event.room_id, prev_group=prev_group, delta_ids=state_updates, current_state_ids=current_state_ids, ) return EventContext.with_state( state_group=state_group, state_group_before_event=context.state_group_before_event, current_state_ids=current_state_ids, prev_state_ids=prev_state_ids, prev_group=prev_group, delta_ids=state_updates, ) async def construct_auth_difference( self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase] ) -> Dict: """ Given a local and remote auth chain, find the differences. This assumes that we have already processed all events in remote_auth Params: local_auth (list) remote_auth (list) Returns: dict """ logger.debug("construct_auth_difference Start!") # TODO: Make sure we are OK with local_auth or remote_auth having more # auth events in them than strictly necessary. def sort_fun(ev): return ev.depth, ev.event_id logger.debug("construct_auth_difference after sort_fun!") # We find the differences by starting at the "bottom" of each list # and iterating up on both lists. The lists are ordered by depth and # then event_id, we iterate up both lists until we find the event ids # don't match. Then we look at depth/event_id to see which side is # missing that event, and iterate only up that list. Repeat. remote_list = list(remote_auth) remote_list.sort(key=sort_fun) local_list = list(local_auth) local_list.sort(key=sort_fun) local_iter = iter(local_list) remote_iter = iter(remote_list) logger.debug("construct_auth_difference before get_next!") def get_next(it, opt=None): try: return next(it) except Exception: return opt current_local = get_next(local_iter) current_remote = get_next(remote_iter) logger.debug("construct_auth_difference before while") missing_remotes = [] missing_locals = [] while current_local or current_remote: if current_remote is None: missing_locals.append(current_local) current_local = get_next(local_iter) continue if current_local is None: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue if current_local.event_id == current_remote.event_id: current_local = get_next(local_iter) current_remote = get_next(remote_iter) continue if current_local.depth < current_remote.depth: missing_locals.append(current_local) current_local = get_next(local_iter) continue if current_local.depth > current_remote.depth: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue # They have the same depth, so we fall back to the event_id order if current_local.event_id < current_remote.event_id: missing_locals.append(current_local) current_local = get_next(local_iter) if current_local.event_id > current_remote.event_id: missing_remotes.append(current_remote) current_remote = get_next(remote_iter) continue logger.debug("construct_auth_difference after while") # missing locals should be sent to the server # We should find why we are missing remotes, as they will have been # rejected. # Remove events from missing_remotes if they are referencing a missing # remote. We only care about the "root" rejected ones. missing_remote_ids = [e.event_id for e in missing_remotes] base_remote_rejected = list(missing_remotes) for e in missing_remotes: for e_id in e.auth_event_ids(): if e_id in missing_remote_ids: try: base_remote_rejected.remove(e) except ValueError: pass reason_map = {} for e in base_remote_rejected: reason = await self.store.get_rejection_reason(e.event_id) if reason is None: # TODO: e is not in the current state, so we should # construct some proof of that. continue reason_map[e.event_id] = reason logger.debug("construct_auth_difference returning") return { "auth_chain": local_auth, "rejects": { e.event_id: {"reason": reason_map[e.event_id], "proof": None} for e in base_remote_rejected }, "missing": [e.event_id for e in missing_locals], } @log_function async def exchange_third_party_invite( self, sender_user_id, target_user_id, room_id, signed ): third_party_invite = {"signed": signed} event_dict = { "type": EventTypes.Member, "content": { "membership": Membership.INVITE, "third_party_invite": third_party_invite, }, "room_id": room_id, "sender": sender_user_id, "state_key": target_user_id, } if await self.auth.check_host_in_room(room_id, self.hs.hostname): room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new(room_version, event_dict) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) EventValidator().validate_new(event, self.config) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = self.hs.hostname try: await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) raise e await self._check_signature(event, context) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.send_membership_event(None, event, context) else: destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)} await self.federation_client.forward_third_party_invite( destinations, room_id, event_dict ) async def on_exchange_third_party_invite_request( self, event_dict: JsonDict ) -> None: """Handle an exchange_third_party_invite request from a remote server The remote server will call this when it wants to turn a 3pid invite into a normal m.room.member invite. Args: event_dict: Dictionary containing the event body. """ assert_params_in_dict(event_dict, ["room_id"]) room_version = await self.store.get_room_version_id(event_dict["room_id"]) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. builder = self.event_builder_factory.new(room_version, event_dict) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) try: await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e await self._check_signature(event, context) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender) # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() await member_handler.send_membership_event(None, event, context) async def add_display_name_to_third_party_invite( self, room_version, event_dict, event, context ): key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], ) original_invite = None prev_state_ids = await context.get_prev_state_ids() original_invite_id = prev_state_ids.get(key) if original_invite_id: original_invite = await self.store.get_event( original_invite_id, allow_none=True ) if original_invite: # If the m.room.third_party_invite event's content is empty, it means the # invite has been revoked. In this case, we don't have to raise an error here # because the auth check will fail on the invite (because it's not able to # fetch public keys from the m.room.third_party_invite event's content, which # is empty). display_name = original_invite.content.get("display_name") event_dict["content"]["third_party_invite"]["display_name"] = display_name else: logger.info( "Could not find invite event for third_party_invite: %r", event_dict ) # We don't discard here as this is not the appropriate place to do # auth checks. If we need the invite and don't have it then the # auth check code will explode appropriately. builder = self.event_builder_factory.new(room_version, event_dict) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) EventValidator().validate_new(event, self.config) return (event, context) async def _check_signature(self, event, context): """ Checks that the signature in the event is consistent with its invite. Args: event (Event): The m.room.member event to check context (EventContext): Raises: AuthError: if signature didn't match any keys, or key has been revoked, SynapseError: if a transient error meant a key couldn't be checked for revocation. """ signed = event.content["third_party_invite"]["signed"] token = signed["token"] prev_state_ids = await context.get_prev_state_ids() invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token)) invite_event = None if invite_event_id: invite_event = await self.store.get_event(invite_event_id, allow_none=True) if not invite_event: raise AuthError(403, "Could not find invite") logger.debug("Checking auth on event %r", event.content) last_exception = None # type: Optional[Exception] # for each public key in the 3pid invite event for public_key_object in self.hs.get_auth().get_public_keys(invite_event): try: # for each sig on the third_party_invite block of the actual invite for server, signature_block in signed["signatures"].items(): for key_name, encoded_signature in signature_block.items(): if not key_name.startswith("ed25519:"): continue logger.debug( "Attempting to verify sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) try: public_key = public_key_object["public_key"] verify_key = decode_verify_key_bytes( key_name, decode_base64(public_key) ) verify_signed_json(signed, server, verify_key) logger.debug( "Successfully verified sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) except Exception: logger.info( "Failed to verify sig with key %s from %r " "against pubkey %r", key_name, server, public_key_object, ) raise try: if "key_validity_url" in public_key_object: await self._check_key_revocation( public_key, public_key_object["key_validity_url"] ) except Exception: logger.info( "Failed to query key_validity_url %s", public_key_object["key_validity_url"], ) raise return except Exception as e: last_exception = e if last_exception is None: # we can only get here if get_public_keys() returned an empty list # TODO: make this better raise RuntimeError("no public key in invite event") raise last_exception async def _check_key_revocation(self, public_key, url): """ Checks whether public_key has been revoked. Args: public_key (str): base-64 encoded public key. url (str): Key revocation URL. Raises: AuthError: if they key has been revoked. SynapseError: if a transient error meant a key couldn't be checked for revocation. """ try: response = await self.http_client.get_json(url, {"public_key": public_key}) except Exception: raise SynapseError(502, "Third party certificate could not be checked") if "valid" not in response or not response["valid"]: raise AuthError(403, "Third party certificate was invalid") async def persist_events_and_notify( self, room_id: str, event_and_contexts: Sequence[Tuple[EventBase, EventContext]], backfilled: bool = False, ) -> int: """Persists events and tells the notifier/pushers about them, if necessary. Args: room_id: The room ID of events being persisted. event_and_contexts: Sequence of events with their associated context that should be persisted. All events must belong to the same room. backfilled: Whether these events are a result of backfilling or not """ instance = self.config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: result = await self._send_events( instance_name=instance, store=self.store, room_id=room_id, event_and_contexts=event_and_contexts, backfilled=backfilled, ) return result["max_stream_id"] else: assert self.storage.persistence # Note that this returns the events that were persisted, which may not be # the same as were passed in if some were deduplicated due to transaction IDs. events, max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) if self._ephemeral_messages_enabled: for event in events: # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) if not backfilled: # Never notify for backfilled events for event in events: await self._notify_persisted_event(event, max_stream_token) return max_stream_token.stream async def _notify_persisted_event( self, event: EventBase, max_stream_token: RoomStreamToken ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. Args: event: max_stream_id: The max_stream_id returned by persist_events """ extra_users = [] if event.type == EventTypes.Member: target_user_id = event.state_key # We notify for memberships if its an invite for one of our # users if event.internal_metadata.is_outlier(): if event.membership != Membership.INVITE: if not self.is_mine_id(target_user_id): return target_user = UserID.from_string(target_user_id) extra_users.append(target_user) elif event.internal_metadata.is_outlier(): return # the event has been persisted so it should have a stream ordering. assert event.internal_metadata.stream_ordering event_pos = PersistedEventPosition( self._instance_name, event.internal_metadata.stream_ordering ) self.notifier.on_new_room_event( event, event_pos, max_stream_token, extra_users=extra_users ) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the server to join the room. Args: room_id """ if self.config.worker_app: await self._clean_room_for_join_client(room_id) else: await self.store.clean_room_for_join(room_id) async def get_room_complexity( self, remote_room_hosts: List[str], room_id: str ) -> Optional[dict]: """ Fetch the complexity of a remote room over federation. Args: remote_room_hosts (list[str]): The remote servers to ask. room_id (str): The room ID to ask about. Returns: Dict contains the complexity metric versions, while None means we could not fetch the complexity. """ for host in remote_room_hosts: res = await self.federation_client.get_room_complexity(host, room_id) # We got a result, return it. if res: return res # We fell off the bottom, couldn't get the complexity from anyone. Oh # well. return None
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_7
crossvul-python_data_bad_4351_0
"""Base Tornado handlers for the Jupyter server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import datetime import functools import ipaddress import json import mimetypes import os import re import sys import traceback import types import warnings from http.client import responses from http.cookies import Morsel from urllib.parse import urlparse from jinja2 import TemplateNotFound from tornado import web, gen, escape, httputil from tornado.log import app_log import prometheus_client from jupyter_server._sysinfo import get_sys_info from traitlets.config import Application from ipython_genutils.path import filefind from ipython_genutils.py3compat import string_types import jupyter_server from jupyter_server._tz import utcnow from jupyter_server.i18n import combine_translations from jupyter_server.utils import is_hidden, url_path_join, url_is_absolute, url_escape from jupyter_server.services.security import csp_report_uri #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') _sys_info_cache = None def json_sys_info(): global _sys_info_cache if _sys_info_cache is None: _sys_info_cache = json.dumps(get_sys_info()) return _sys_info_cache def log(): if Application.initialized(): return Application.instance().log else: return app_log class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" @property def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ if 'Content-Security-Policy' in self.settings.get('headers', {}): # user-specified, don't override return self.settings['headers']['Content-Security-Policy'] return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + self.settings.get('csp_report_uri', url_path_join(self.base_url, csp_report_uri)), ]) def set_default_headers(self): headers = {} headers.update(self.settings.get('headers', {})) headers["Content-Security-Policy"] = self.content_security_policy # Allow for overriding headers for header_name, value in headers.items(): try: self.set_header(header_name, value) except Exception as e: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) self.log.debug(e) def force_clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Tornado's cookie handling currently (Jan 2018) stores cookies in a dict keyed by name, so it can only modify one cookie with a given name per response. The browser can store multiple cookies with the same name but different domains and/or paths. This method lets us clear multiple cookies with the same name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). """ name = escape.native_str(name) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) morsel = Morsel() morsel.set(name, '', '""') morsel['expires'] = httputil.format_timestamp(expires) morsel['path'] = path if domain: morsel['domain'] = domain self.add_header("Set-Cookie", morsel.OutputString()) def clear_login_cookie(self): cookie_options = self.settings.get('cookie_options', {}) path = cookie_options.setdefault('path', self.base_url) self.clear_cookie(self.cookie_name, path=path) if path and path != '/': # also clear cookie on / to ensure old cookies are cleared # after the change in path behavior. # N.B. This bypasses the normal cookie handling, which can't update # two cookies with the same name. See the method above. self.force_clear_cookie(self.cookie_name) def get_current_user(self): if self.login_handler is None: return 'anonymous' return self.login_handler.get_user(self) def skip_check_origin(self): """Ask my login_handler if I should skip the origin_check For example: in the default LoginHandler, if a request is token-authenticated, origin checking should be skipped. """ if self.request.method == 'OPTIONS': # no origin-check on options requests, which are used to check origins! return True if self.login_handler is None or not hasattr(self.login_handler, 'should_check_origin'): return False return not self.login_handler.should_check_origin(self) @property def token_authenticated(self): """Have I been authenticated with a token?""" if self.login_handler is None or not hasattr(self.login_handler, 'is_token_authenticated'): return False return self.login_handler.is_token_authenticated(self) @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def logged_in(self): """Is a user currently logged in?""" user = self.get_current_user() return (user and not user == 'anonymous') @property def login_handler(self): """Return the login handler for this application, if any.""" return self.settings.get('login_handler_class', None) @property def token(self): """Return the login token for this application, if any.""" return self.settings.get('token', None) @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ if self.login_handler is None: return False return bool(self.login_handler.get_login_available(self.settings)) class JupyterHandler(AuthenticatedHandler): """Jupyter-specific extensions to authenticated handling Mostly property shortcuts to Jupyter-specific settings. """ @property def config(self): return self.settings.get('config', None) @property def log(self): """use the Jupyter log by default, falling back on tornado's logger""" return log() @property def jinja_template_vars(self): """User-supplied values to supply to jinja templates.""" return self.settings.get('jinja_template_vars', {}) #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def version_hash(self): """The version hash to use for cache hints for static files""" return self.settings.get('version_hash', '') @property def mathjax_url(self): url = self.settings.get('mathjax_url', '') if not url or url_is_absolute(url): return url return url_path_join(self.base_url, url) @property def mathjax_config(self): return self.settings.get('mathjax_config', 'TeX-AMS-MML_HTMLorMML-full,Safe') @property def base_url(self): return self.settings.get('base_url', '/') @property def default_url(self): return self.settings.get('default_url', '') @property def ws_url(self): return self.settings.get('websocket_url', '') @property def contents_js_source(self): self.log.debug("Using contents: %s", self.settings.get('contents_js_source', 'services/contents')) return self.settings.get('contents_js_source', 'services/contents') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def contents_manager(self): return self.settings['contents_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def terminal_manager(self): return self.settings['terminal_manager'] @property def kernel_spec_manager(self): return self.settings['kernel_spec_manager'] @property def config_manager(self): return self.settings['config_manager'] #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(JupyterHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) elif ( self.token_authenticated and "Access-Control-Allow-Origin" not in self.settings.get('headers', {}) ): # allow token-authenticated requests cross-origin by default. # only apply this exception if allow-origin has not been specified. self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def set_attachment_header(self, filename): """Set Content-Disposition: attachment header As a method to ensure handling of filename encoding """ escaped_filename = url_escape(filename) self.set_header('Content-Disposition', 'attachment;' " filename*=utf-8''{utf8}" .format( utf8=escaped_filename, ) ) def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin # origin_to_satisfy_tornado is present because tornado requires # check_origin to take an origin argument, but we don't use it def check_origin(self, origin_to_satisfy_tornado=""): """Check Origin for cross-site API requests, including websockets Copied from WebSocket with changes: - allow unspecified host/origin (e.g. scripts) - allow token-authenticated requests """ if self.allow_origin == '*' or self.skip_check_origin(): return True host = self.request.headers.get("Host") origin = self.request.headers.get("Origin") # If no header is provided, let the request through. # Origin can be None for: # - same-origin (IE, Firefox) # - Cross-site POST form (IE, Firefox) # - Scripts # The cross-site POST (XSRF) case is handled by tornado's xsrf_token if origin is None or host is None: return True origin = origin.lower() origin_host = urlparse(origin).netloc # OK if origin matches host if origin_host == host: return True # Check CORS headers if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS headers deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin API request for %s. Origin: %s, Host: %s", self.request.path, origin, host, ) return allow def check_xsrf_cookie(self): """Bypass xsrf cookie checks when token-authenticated""" if self.token_authenticated or self.settings.get('disable_check_xsrf', False): # Token-authenticated requests do not need additional XSRF-check # Servers without authentication are vulnerable to XSRF return return super(JupyterHandler, self).check_xsrf_cookie() def check_host(self): """Check the host header if remote access disallowed. Returns True if the request should continue, False otherwise. """ if self.settings.get('allow_remote_access', False): return True # Remove port (e.g. ':8888') from host host = re.match(r'^(.*?)(:\d+)?$', self.request.host).group(1) # Browsers format IPv6 addresses like [::1]; we need to remove the [] if host.startswith('[') and host.endswith(']'): host = host[1:-1] try: addr = ipaddress.ip_address(host) except ValueError: # Not an IP address: check against hostnames allow = host in self.settings.get('local_hostnames', ['localhost']) else: allow = addr.is_loopback if not allow: self.log.warning( ("Blocking request with non-local 'Host' %s (%s). " "If the server should be accessible at that name, " "set ServerApp.allow_remote_access to disable the check."), host, self.request.host ) return allow def prepare(self): if not self.check_host(): raise web.HTTPError(403) return super(JupyterHandler, self).prepare() #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, default_url=self.default_url, ws_url=self.ws_url, logged_in=self.logged_in, allow_password_change=self.settings.get('allow_password_change'), login_available=self.login_available, token_available=bool(self.token), static_url=self.static_url, sys_info=json_sys_info(), contents_js_source=self.contents_js_source, version_hash=self.version_hash, xsrf_form_html=self.xsrf_form_html, token=self.token, xsrf_token=self.xsrf_token.decode('utf8'), nbjs_translations=json.dumps(combine_translations( self.request.headers.get('Accept-Language', ''))), **self.jinja_template_vars ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception as e: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') from e return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') exception = '(unknown)' if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: html = self.render_template('error.html', **ns) self.write(html) class APIHandler(JupyterHandler): """Base class for API handlers""" def prepare(self): if not self.check_origin(): raise web.HTTPError(404) return super(APIHandler, self).prepare() def write_error(self, status_code, **kwargs): """APIHandler errors are JSON, not human pages""" self.set_header('Content-Type', 'application/json') message = responses.get(status_code, 'Unknown HTTP Error') reply = { 'message': message, } exc_info = kwargs.get('exc_info') if exc_info: e = exc_info[1] if isinstance(e, HTTPError): reply['message'] = e.log_message or message reply['reason'] = e.reason else: reply['message'] = 'Unhandled error' reply['reason'] = None reply['traceback'] = ''.join(traceback.format_exception(*exc_info)) self.log.warning(reply['message']) self.finish(json.dumps(reply)) def get_current_user(self): """Raise 403 on API handlers instead of redirecting to human login page""" # preserve _user_cache so we don't raise more than once if hasattr(self, '_user_cache'): return self._user_cache self._user_cache = user = super(APIHandler, self).get_current_user() return user def get_login_url(self): # if get_login_url is invoked in an API handler, # that means @web.authenticated is trying to trigger a redirect. # instead of redirecting, raise 403 instead. if not self.current_user: raise web.HTTPError(403) return super(APIHandler, self).get_login_url() @property def content_security_policy(self): csp = '; '.join([ super(APIHandler, self).content_security_policy, "default-src 'none'", ]) return csp # set _track_activity = False on API handlers that shouldn't track activity _track_activity = True def update_api_activity(self): """Update last_activity of API requests""" # record activity of authenticated requests if ( self._track_activity and getattr(self, '_user_cache', None) and self.get_argument('no_track_activity', None) is None ): self.settings['api_last_activity'] = utcnow() def finish(self, *args, **kwargs): self.update_api_activity() self.set_header('Content-Type', 'application/json') return super(APIHandler, self).finish(*args, **kwargs) def options(self, *args, **kwargs): if 'Access-Control-Allow-Headers' in self.settings.get('headers', {}): self.set_header('Access-Control-Allow-Headers', self.settings['headers']['Access-Control-Allow-Headers']) else: self.set_header('Access-Control-Allow-Headers', 'accept, content-type, authorization, x-xsrftoken') self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, PATCH, DELETE, OPTIONS') # if authorization header is requested, # that means the request is token-authenticated. # avoid browser-side rejection of the preflight request. # only allow this exception if allow_origin has not been specified # and Jupyter server authentication is enabled. # If the token is not valid, the 'real' request will still be rejected. requested_headers = self.request.headers.get('Access-Control-Request-Headers', '').split(',') if requested_headers and any( h.strip().lower() == 'authorization' for h in requested_headers ) and ( # FIXME: it would be even better to check specifically for token-auth, # but there is currently no API for this. self.login_available ) and ( self.allow_origin or self.allow_origin_pat or 'Access-Control-Allow-Origin' in self.settings.get('headers', {}) ): self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) class Template404(JupyterHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(JupyterHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the Jupyter server. return super(AuthenticatedFileHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def get(self, path): if os.path.splitext(path)[1] == '.ipynb' or self.get_argument("download", False): name = path.rsplit('/', 1)[-1] self.set_attachment_header(name) return web.StaticFileHandler.get(self, path) def get_content_type(self): path = self.absolute_path.strip('/') if '/' in path: _, name = path.rsplit('/', 1) else: name = path if name.endswith('.ipynb'): return 'application/x-ipynb+json' else: cur_mime = mimetypes.guess_type(name)[0] if cur_mime == 'text/plain': return 'text/plain; charset=UTF-8' else: return super(AuthenticatedFileHandler, self).get_content_type() def set_headers(self): super(AuthenticatedFileHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments: self.add_header("Cache-Control", "no-cache") def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root) and not self.contents_manager.allow_hidden: self.log.info("Refusing to serve hidden file, via 404 Error, use flag 'ContentsManager.allow_hidden' to enable") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ warnings.warn('@json_errors is deprecated in notebook 5.2.0. Subclass APIHandler instead.', DeprecationWarning, stacklevel=2, ) @functools.wraps(method) def wrapper(self, *args, **kwargs): self.write_error = types.MethodType(APIHandler.write_error, self) return method(self, *args, **kwargs) return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(JupyterHandler, web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def set_headers(self): super(FileFindHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments or \ any(self.request.path.startswith(path) for path in self.no_cache_paths): self.set_header("Cache-Control", "no-cache") def initialize(self, path, default_filename=None, no_cache_paths=None): self.no_cache_paths = no_cache_paths or [] if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath log().debug("Path %s served from %s"%(path, abspath)) return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class APIVersionHandler(APIHandler): def get(self): # not authenticated, so give as few info as possible self.finish(json.dumps({"version": jupyter_server.__version__})) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ def get(self): uri = self.request.path.rstrip("/") if uri: self.redirect('?'.join((uri, self.request.query))) post = put = get class MainHandler(JupyterHandler): """Simple handler for base_url.""" def get(self): html = self.render_template("main.html") self.write(html) post = put = get class FilesRedirectHandler(JupyterHandler): """Handler for redirecting relative URLs to the /files/ handler""" @staticmethod def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', url_escape(path)) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warning("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', url_escape(path)) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url) def get(self, path=''): return self.redirect_to_files(self, path) class RedirectWithParams(web.RequestHandler): """Sam as web.RedirectHandler, but preserves URL parameters""" def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self): sep = '&' if '?' in self._url else '?' url = sep.join([self._url, self.request.query]) self.redirect(url, permanent=self._permanent) class PrometheusMetricsHandler(JupyterHandler): """ Return prometheus metrics for this Jupyter server """ @web.authenticated def get(self): self.set_header('Content-Type', prometheus_client.CONTENT_TYPE_LATEST) self.write(prometheus_client.generate_latest(prometheus_client.REGISTRY)) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- # path matches any number of `/foo[/bar...]` or just `/` or '' path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler), (r"api", APIVersionHandler), (r'/(robots\.txt|favicon\.ico)', web.StaticFileHandler), (r'/metrics', PrometheusMetricsHandler) ]
./CrossVul/dataset_final_sorted/CWE-601/py/bad_4351_0
crossvul-python_data_good_3250_3
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask_login import current_user, logout_user from flask_restful import Resource # End the Flask-Logins session from security_monkey import rbac class Logout(Resource): decorators = [rbac.exempt] def get(self): if not current_user.is_authenticated: return "Must be logged in to log out", 200 logout_user() return "Logged Out", 200
./CrossVul/dataset_final_sorted/CWE-601/py/good_3250_3
crossvul-python_data_good_3250_4
# Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup setup( name='security_monkey', version='0.8.0', long_description=__doc__, packages=['security_monkey'], include_package_data=True, zip_safe=False, install_requires=[ 'APScheduler==2.1.2', 'Flask==0.10.1', 'Flask-Mail==0.9.0', 'Flask-Migrate==1.3.1', 'Flask-Principal==0.4.0', 'Flask-RESTful==0.3.3', 'Flask-SQLAlchemy==1.0', 'Flask-Script==0.6.3', # 'Flask-Security==1.7.4', 'Flask-Security-Fork==1.8.2', 'Jinja2==2.8', 'SQLAlchemy==0.9.2', 'boto>=2.41.0', 'ipaddr==2.1.11', 'itsdangerous==0.23', 'psycopg2==2.6.2', 'bcrypt==3.1.2', 'Sphinx==1.2.2', 'gunicorn==18.0', 'cryptography==1.7.1', 'boto3>=1.4.2', 'botocore>=1.4.81', 'dpath==1.3.2', 'pyyaml==3.11', 'jira==0.32', 'cloudaux>=1.0.6', 'joblib>=0.9.4', 'pyjwt>=1.01', ], extras_require = { 'onelogin': ['python-saml>=2.2.0'], 'tests': [ 'nose==1.3.0', 'mock==1.0.1', 'moto==0.4.30', 'freezegun>=0.3.7' ] } )
./CrossVul/dataset_final_sorted/CWE-601/py/good_3250_4
crossvul-python_data_good_4332_1
"""Base Tornado handlers for the notebook server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import datetime import functools import ipaddress import json import mimetypes import os import re import sys import traceback import types import warnings from http.client import responses from http.cookies import Morsel from urllib.parse import urlparse from jinja2 import TemplateNotFound from tornado import web, gen, escape, httputil from tornado.log import app_log import prometheus_client from notebook._sysinfo import get_sys_info from traitlets.config import Application from ipython_genutils.path import filefind from ipython_genutils.py3compat import string_types import notebook from notebook._tz import utcnow from notebook.i18n import combine_translations from notebook.utils import is_hidden, url_path_join, url_is_absolute, url_escape, urldecode_unix_socket_path from notebook.services.security import csp_report_uri #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') _sys_info_cache = None def json_sys_info(): global _sys_info_cache if _sys_info_cache is None: _sys_info_cache = json.dumps(get_sys_info()) return _sys_info_cache def log(): if Application.initialized(): return Application.instance().log else: return app_log class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" @property def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ if 'Content-Security-Policy' in self.settings.get('headers', {}): # user-specified, don't override return self.settings['headers']['Content-Security-Policy'] return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + self.settings.get('csp_report_uri', url_path_join(self.base_url, csp_report_uri)), ]) def set_default_headers(self): headers = {} headers["X-Content-Type-Options"] = "nosniff" headers.update(self.settings.get('headers', {})) headers["Content-Security-Policy"] = self.content_security_policy # Allow for overriding headers for header_name, value in headers.items(): try: self.set_header(header_name, value) except Exception as e: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) self.log.debug(e) def force_clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Tornado's cookie handling currently (Jan 2018) stores cookies in a dict keyed by name, so it can only modify one cookie with a given name per response. The browser can store multiple cookies with the same name but different domains and/or paths. This method lets us clear multiple cookies with the same name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). """ name = escape.native_str(name) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) morsel = Morsel() morsel.set(name, '', '""') morsel['expires'] = httputil.format_timestamp(expires) morsel['path'] = path if domain: morsel['domain'] = domain self.add_header("Set-Cookie", morsel.OutputString()) def clear_login_cookie(self): cookie_options = self.settings.get('cookie_options', {}) path = cookie_options.setdefault('path', self.base_url) self.clear_cookie(self.cookie_name, path=path) if path and path != '/': # also clear cookie on / to ensure old cookies are cleared # after the change in path behavior (changed in notebook 5.2.2). # N.B. This bypasses the normal cookie handling, which can't update # two cookies with the same name. See the method above. self.force_clear_cookie(self.cookie_name) def get_current_user(self): if self.login_handler is None: return 'anonymous' return self.login_handler.get_user(self) def skip_check_origin(self): """Ask my login_handler if I should skip the origin_check For example: in the default LoginHandler, if a request is token-authenticated, origin checking should be skipped. """ if self.request.method == 'OPTIONS': # no origin-check on options requests, which are used to check origins! return True if self.login_handler is None or not hasattr(self.login_handler, 'should_check_origin'): return False return not self.login_handler.should_check_origin(self) @property def token_authenticated(self): """Have I been authenticated with a token?""" if self.login_handler is None or not hasattr(self.login_handler, 'is_token_authenticated'): return False return self.login_handler.is_token_authenticated(self) @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def logged_in(self): """Is a user currently logged in?""" user = self.get_current_user() return (user and not user == 'anonymous') @property def login_handler(self): """Return the login handler for this application, if any.""" return self.settings.get('login_handler_class', None) @property def token(self): """Return the login token for this application, if any.""" return self.settings.get('token', None) @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ if self.login_handler is None: return False return bool(self.login_handler.get_login_available(self.settings)) class IPythonHandler(AuthenticatedHandler): """IPython-specific extensions to authenticated handling Mostly property shortcuts to IPython-specific settings. """ @property def ignore_minified_js(self): """Wether to user bundle in template. (*.min files) Mainly use for development and avoid file recompilation """ return self.settings.get('ignore_minified_js', False) @property def config(self): return self.settings.get('config', None) @property def log(self): """use the IPython log by default, falling back on tornado's logger""" return log() @property def jinja_template_vars(self): """User-supplied values to supply to jinja templates.""" return self.settings.get('jinja_template_vars', {}) #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def version_hash(self): """The version hash to use for cache hints for static files""" return self.settings.get('version_hash', '') @property def mathjax_url(self): url = self.settings.get('mathjax_url', '') if not url or url_is_absolute(url): return url return url_path_join(self.base_url, url) @property def mathjax_config(self): return self.settings.get('mathjax_config', 'TeX-AMS-MML_HTMLorMML-full,Safe') @property def base_url(self): return self.settings.get('base_url', '/') @property def default_url(self): return self.settings.get('default_url', '') @property def ws_url(self): return self.settings.get('websocket_url', '') @property def contents_js_source(self): self.log.debug("Using contents: %s", self.settings.get('contents_js_source', 'services/contents')) return self.settings.get('contents_js_source', 'services/contents') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def contents_manager(self): return self.settings['contents_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def terminal_manager(self): return self.settings['terminal_manager'] @property def kernel_spec_manager(self): return self.settings['kernel_spec_manager'] @property def config_manager(self): return self.settings['config_manager'] #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(IPythonHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) elif ( self.token_authenticated and "Access-Control-Allow-Origin" not in self.settings.get('headers', {}) ): # allow token-authenticated requests cross-origin by default. # only apply this exception if allow-origin has not been specified. self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def set_attachment_header(self, filename): """Set Content-Disposition: attachment header As a method to ensure handling of filename encoding """ escaped_filename = url_escape(filename) self.set_header('Content-Disposition', 'attachment;' " filename*=utf-8''{utf8}" .format( utf8=escaped_filename, ) ) def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin # origin_to_satisfy_tornado is present because tornado requires # check_origin to take an origin argument, but we don't use it def check_origin(self, origin_to_satisfy_tornado=""): """Check Origin for cross-site API requests, including websockets Copied from WebSocket with changes: - allow unspecified host/origin (e.g. scripts) - allow token-authenticated requests """ if self.allow_origin == '*' or self.skip_check_origin(): return True host = self.request.headers.get("Host") origin = self.request.headers.get("Origin") # If no header is provided, let the request through. # Origin can be None for: # - same-origin (IE, Firefox) # - Cross-site POST form (IE, Firefox) # - Scripts # The cross-site POST (XSRF) case is handled by tornado's xsrf_token if origin is None or host is None: return True origin = origin.lower() origin_host = urlparse(origin).netloc # OK if origin matches host if origin_host == host: return True # Check CORS headers if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS headers deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin API request for %s. Origin: %s, Host: %s", self.request.path, origin, host, ) return allow def check_referer(self): """Check Referer for cross-site requests. Disables requests to certain endpoints with external or missing Referer. If set, allow_origin settings are applied to the Referer to whitelist specific cross-origin sites. Used on GET for api endpoints and /files/ to block cross-site inclusion (XSSI). """ host = self.request.headers.get("Host") referer = self.request.headers.get("Referer") if not host: self.log.warning("Blocking request with no host") return False if not referer: self.log.warning("Blocking request with no referer") return False referer_url = urlparse(referer) referer_host = referer_url.netloc if referer_host == host: return True # apply cross-origin checks to Referer: origin = "{}://{}".format(referer_url.scheme, referer_url.netloc) if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS settings, deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin request for %s. Referer: %s, Host: %s", self.request.path, origin, host, ) return allow def check_xsrf_cookie(self): """Bypass xsrf cookie checks when token-authenticated""" if self.token_authenticated or self.settings.get('disable_check_xsrf', False): # Token-authenticated requests do not need additional XSRF-check # Servers without authentication are vulnerable to XSRF return try: return super(IPythonHandler, self).check_xsrf_cookie() except web.HTTPError as e: if self.request.method in {'GET', 'HEAD'}: # Consider Referer a sufficient cross-origin check for GET requests if not self.check_referer(): referer = self.request.headers.get('Referer') if referer: msg = "Blocking Cross Origin request from {}.".format(referer) else: msg = "Blocking request from unknown origin" raise web.HTTPError(403, msg) from e else: raise def check_host(self): """Check the host header if remote access disallowed. Returns True if the request should continue, False otherwise. """ if self.settings.get('allow_remote_access', False): return True # Remove port (e.g. ':8888') from host host = re.match(r'^(.*?)(:\d+)?$', self.request.host).group(1) # Browsers format IPv6 addresses like [::1]; we need to remove the [] if host.startswith('[') and host.endswith(']'): host = host[1:-1] # UNIX socket handling check_host = urldecode_unix_socket_path(host) if check_host.startswith('/') and os.path.exists(check_host): allow = True else: try: addr = ipaddress.ip_address(host) except ValueError: # Not an IP address: check against hostnames allow = host in self.settings.get('local_hostnames', ['localhost']) else: allow = addr.is_loopback if not allow: self.log.warning( ("Blocking request with non-local 'Host' %s (%s). " "If the notebook should be accessible at that name, " "set NotebookApp.allow_remote_access to disable the check."), host, self.request.host ) return allow def prepare(self): if not self.check_host(): raise web.HTTPError(403) return super(IPythonHandler, self).prepare() #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, default_url=self.default_url, ws_url=self.ws_url, logged_in=self.logged_in, allow_password_change=self.settings.get('allow_password_change'), login_available=self.login_available, token_available=bool(self.token), static_url=self.static_url, sys_info=json_sys_info(), contents_js_source=self.contents_js_source, version_hash=self.version_hash, ignore_minified_js=self.ignore_minified_js, xsrf_form_html=self.xsrf_form_html, token=self.token, xsrf_token=self.xsrf_token.decode('utf8'), nbjs_translations=json.dumps(combine_translations( self.request.headers.get('Accept-Language', ''))), **self.jinja_template_vars ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception as e: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') from e return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') exception = '(unknown)' if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: html = self.render_template('error.html', **ns) self.write(html) class APIHandler(IPythonHandler): """Base class for API handlers""" def prepare(self): if not self.check_origin(): raise web.HTTPError(404) return super(APIHandler, self).prepare() def write_error(self, status_code, **kwargs): """APIHandler errors are JSON, not human pages""" self.set_header('Content-Type', 'application/json') message = responses.get(status_code, 'Unknown HTTP Error') reply = { 'message': message, } exc_info = kwargs.get('exc_info') if exc_info: e = exc_info[1] if isinstance(e, HTTPError): reply['message'] = e.log_message or message reply['reason'] = e.reason else: reply['message'] = 'Unhandled error' reply['reason'] = None reply['traceback'] = ''.join(traceback.format_exception(*exc_info)) self.log.warning(reply['message']) self.finish(json.dumps(reply)) def get_current_user(self): """Raise 403 on API handlers instead of redirecting to human login page""" # preserve _user_cache so we don't raise more than once if hasattr(self, '_user_cache'): return self._user_cache self._user_cache = user = super(APIHandler, self).get_current_user() return user def get_login_url(self): # if get_login_url is invoked in an API handler, # that means @web.authenticated is trying to trigger a redirect. # instead of redirecting, raise 403 instead. if not self.current_user: raise web.HTTPError(403) return super(APIHandler, self).get_login_url() @property def content_security_policy(self): csp = '; '.join([ super(APIHandler, self).content_security_policy, "default-src 'none'", ]) return csp # set _track_activity = False on API handlers that shouldn't track activity _track_activity = True def update_api_activity(self): """Update last_activity of API requests""" # record activity of authenticated requests if ( self._track_activity and getattr(self, '_user_cache', None) and self.get_argument('no_track_activity', None) is None ): self.settings['api_last_activity'] = utcnow() def finish(self, *args, **kwargs): self.update_api_activity() self.set_header('Content-Type', 'application/json') return super(APIHandler, self).finish(*args, **kwargs) def options(self, *args, **kwargs): if 'Access-Control-Allow-Headers' in self.settings.get('headers', {}): self.set_header('Access-Control-Allow-Headers', self.settings['headers']['Access-Control-Allow-Headers']) else: self.set_header('Access-Control-Allow-Headers', 'accept, content-type, authorization, x-xsrftoken') self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, PATCH, DELETE, OPTIONS') # if authorization header is requested, # that means the request is token-authenticated. # avoid browser-side rejection of the preflight request. # only allow this exception if allow_origin has not been specified # and notebook authentication is enabled. # If the token is not valid, the 'real' request will still be rejected. requested_headers = self.request.headers.get('Access-Control-Request-Headers', '').split(',') if requested_headers and any( h.strip().lower() == 'authorization' for h in requested_headers ) and ( # FIXME: it would be even better to check specifically for token-auth, # but there is currently no API for this. self.login_available ) and ( self.allow_origin or self.allow_origin_pat or 'Access-Control-Allow-Origin' in self.settings.get('headers', {}) ): self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) class Template404(IPythonHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(IPythonHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the notebook server. return super(AuthenticatedFileHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def head(self, path): self.check_xsrf_cookie() return super(AuthenticatedFileHandler, self).head(path) @web.authenticated def get(self, path): self.check_xsrf_cookie() if os.path.splitext(path)[1] == '.ipynb' or self.get_argument("download", False): name = path.rsplit('/', 1)[-1] self.set_attachment_header(name) return web.StaticFileHandler.get(self, path) def get_content_type(self): path = self.absolute_path.strip('/') if '/' in path: _, name = path.rsplit('/', 1) else: name = path if name.endswith('.ipynb'): return 'application/x-ipynb+json' else: cur_mime = mimetypes.guess_type(name)[0] if cur_mime == 'text/plain': return 'text/plain; charset=UTF-8' else: return super(AuthenticatedFileHandler, self).get_content_type() def set_headers(self): super(AuthenticatedFileHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments: self.add_header("Cache-Control", "no-cache") def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root) and not self.contents_manager.allow_hidden: self.log.info("Refusing to serve hidden file, via 404 Error, use flag 'ContentsManager.allow_hidden' to enable") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ warnings.warn('@json_errors is deprecated in notebook 5.2.0. Subclass APIHandler instead.', DeprecationWarning, stacklevel=2, ) @functools.wraps(method) def wrapper(self, *args, **kwargs): self.write_error = types.MethodType(APIHandler.write_error, self) return method(self, *args, **kwargs) return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(IPythonHandler, web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def set_headers(self): super(FileFindHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments or \ any(self.request.path.startswith(path) for path in self.no_cache_paths): self.set_header("Cache-Control", "no-cache") def initialize(self, path, default_filename=None, no_cache_paths=None): self.no_cache_paths = no_cache_paths or [] if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath log().debug("Path %s served from %s"%(path, abspath)) return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class APIVersionHandler(APIHandler): def get(self): # not authenticated, so give as few info as possible self.finish(json.dumps({"version":notebook.__version__})) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ def get(self): path, *rest = self.request.uri.partition("?") # trim trailing *and* leading / # to avoid misinterpreting repeated '//' path = "/" + path.strip("/") new_uri = "".join([path, *rest]) self.redirect(new_uri) post = put = get class FilesRedirectHandler(IPythonHandler): """Handler for redirecting relative URLs to the /files/ handler""" @staticmethod def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', url_escape(path)) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warning("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', url_escape(path)) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url) def get(self, path=''): return self.redirect_to_files(self, path) class RedirectWithParams(web.RequestHandler): """Sam as web.RedirectHandler, but preserves URL parameters""" def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self): sep = '&' if '?' in self._url else '?' url = sep.join([self._url, self.request.query]) self.redirect(url, permanent=self._permanent) class PrometheusMetricsHandler(IPythonHandler): """ Return prometheus metrics for this notebook server """ @web.authenticated def get(self): self.set_header('Content-Type', prometheus_client.CONTENT_TYPE_LATEST) self.write(prometheus_client.generate_latest(prometheus_client.REGISTRY)) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- # path matches any number of `/foo[/bar...]` or just `/` or '' path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler), (r"api", APIVersionHandler), (r'/(robots\.txt|favicon\.ico)', web.StaticFileHandler), (r'/metrics', PrometheusMetricsHandler) ]
./CrossVul/dataset_final_sorted/CWE-601/py/good_4332_1
crossvul-python_data_bad_3250_0
import itertools from flask import request, abort, _app_ctx_stack, redirect from flask_security.core import AnonymousUser from security_monkey.datastore import User try: from flask.ext.login import current_user except ImportError: current_user = None from .models import RBACRole, RBACUserMixin from . import anonymous from flask import Response import json class AccessControlList(object): """ This class record rules for access controling. """ def __init__(self): self._allowed = [] self._exempt = [] self.seted = False def allow(self, role, method, resource, with_children=True): """Add allowing rules. :param role: Role of this rule. :param method: Method to allow in rule, include GET, POST, PUT etc. :param resource: Resource also view function. :param with_children: Allow role's children in rule as well if with_children is `True` """ if with_children: for r in role.get_children(): permission = (r.name, method, resource) if permission not in self._allowed: self._allowed.append(permission) permission = (role.name, method, resource) if permission not in self._allowed: self._allowed.append(permission) def exempt(self, view_func): """Exempt a view function from being checked permission :param view_func: The view function exempt from checking. """ if not view_func in self._exempt: self._exempt.append(view_func) def is_allowed(self, role, method, resource): """Check whether role is allowed to access resource :param role: Role to be checked. :param method: Method to be checked. :param resource: View function to be checked. """ return (role, method, resource) in self._allowed def is_exempt(self, view_func): """Return whether view_func is exempted. :param view_func: View function to be checked. """ return view_func in self._exempt class _RBACState(object): """Records configuration for Flask-RBAC""" def __init__(self, rbac, app): self.rbac = rbac self.app = app class RBAC(object): """ This class implements role-based access control module in Flask. There are two way to initialize Flask-RBAC:: app = Flask(__name__) rbac = RBAC(app) :param app: the Flask object """ _role_model = RBACRole _user_model = RBACUserMixin def __init__(self, app): self.acl = AccessControlList() self.before_acl = [] self.app = app self.init_app(app) def init_app(self, app): # Add (RBAC, app) to flask extensions. # Add hook to authenticate permission before request. if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['rbac'] = _RBACState(self, app) self.acl.allow(anonymous, 'GET', app.view_functions['static'].__name__) app.before_first_request(self._setup_acl) app.before_request(self._authenticate) def has_permission(self, method, endpoint, user=None): """Return whether the current user can access the resource. Example:: @app.route('/some_url', methods=['GET', 'POST']) @rbac.allow(['anonymous'], ['GET']) def a_view_func(): return Response('Blah Blah...') If you are not logged. `rbac.has_permission('GET', 'a_view_func')` return True. `rbac.has_permission('POST', 'a_view_func')` return False. :param method: The method wait to check. :param endpoint: The application endpoint. :param user: user who you need to check. Current user by default. """ app = self.get_app() _user = user or current_user roles = _user.get_roles() view_func = app.view_functions[endpoint] return self._check_permission(roles, method, view_func) def check_perm(self, role, method, callback=None): def decorator(view_func): if not self._check_permission([role], method, view_func): if callable(callback): callback() else: self._deny_hook() return view_func return decorator def allow(self, roles, methods, with_children=True): """Decorator: allow roles to access the view func with it. :param roles: List, each name of roles. Please note that, `anonymous` is refered to anonymous. If you add `anonymous` to the rule, everyone can access the resource, unless you deny other roles. :param methods: List, each name of methods. methods is valid in ['GET', 'POST', 'PUT', 'DELETE'] :param with_children: Whether allow children of roles as well. True by default. """ def decorator(view_func): _methods = [m.upper() for m in methods] for r, m, v in itertools.product(roles, _methods, [view_func.__name__]): self.before_acl.append((r, m, v, with_children)) return view_func return decorator def exempt(self, view_func): """ Decorator function Exempt a view function from being checked permission. """ self.acl.exempt(view_func.__name__) return view_func def get_app(self, reference_app=None): """ Helper to look up an app. """ if reference_app is not None: return reference_app if self.app is not None: return self.app ctx = _app_ctx_stack.top if ctx is not None: return ctx.app raise RuntimeError('application not registered on rbac ' 'instance and no application bound ' 'to current context') def _authenticate(self): app = self.get_app() assert app, "Please initialize your application into Flask-RBAC." assert self._role_model, "Please set role model before authenticate." assert self._user_model, "Please set user model before authenticate." user = current_user if not isinstance(user._get_current_object(), self._user_model) and not isinstance(user._get_current_object(), AnonymousUser): raise TypeError( "%s is not an instance of %s" % (user, self._user_model.__class__)) endpoint = request.endpoint resource = app.view_functions.get(endpoint, None) if not resource: abort(404) method = request.method if not hasattr(user, 'get_roles'): roles = [anonymous] else: roles = user.get_roles() permit = self._check_permission(roles, method, resource) if not permit: return self._deny_hook(resource=resource) def _check_permission(self, roles, method, resource): resource = resource.__name__ if self.acl.is_exempt(resource): return True if not self.acl.seted: self._setup_acl() _roles = set() _methods = {'*', method} _resources = {None, resource} _roles.add(anonymous) _roles.update(roles) for r, m, res in itertools.product(_roles, _methods, _resources): if self.acl.is_allowed(r.name, m, res): return True return False def _deny_hook(self, resource=None): app = self.get_app() if current_user.is_authenticated(): status = 403 else: status = 401 #abort(status) if app.config.get('FRONTED_BY_NGINX'): url = "https://{}:{}{}".format(app.config.get('FQDN'), app.config.get('NGINX_PORT'), '/login') else: url = "http://{}:{}{}".format(app.config.get('FQDN'), app.config.get('API_PORT'), '/login') if current_user.is_authenticated(): auth_dict = { "authenticated": True, "user": current_user.email, "roles": current_user.role, } else: auth_dict = { "authenticated": False, "user": None, "url": url } return Response(response=json.dumps({"auth": auth_dict}), status=status, mimetype="application/json") def _setup_acl(self): for rn, method, resource, with_children in self.before_acl: role = self._role_model.get_by_name(rn) self.acl.allow(role, method, resource, with_children) self.acl.seted = True
./CrossVul/dataset_final_sorted/CWE-601/py/bad_3250_0
crossvul-python_data_bad_2548_0
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2012 Red Hat, Inc. # Copyright (C) 2008 Ricky Zhou # This file is part of python-fedora # # python-fedora is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # python-fedora is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with python-fedora; if not, see <http://www.gnu.org/licenses/> # ''' Miscellaneous functions of use on a TurboGears Server .. versionchanged:: 0.3.14 Save the original turbogears.url function as :func:`fedora.tg.util.tg_url` .. versionchanged:: 0.3.17 Renamed from fedora.tg.util .. versionchanged:: 0.3.25 Renamed from fedora.tg.tg1utils .. moduleauthor:: Toshio Kuratomi <tkuratom@redhat.com> .. moduleauthor:: Ricky Zhou <ricky@fedoraproject.org> ''' from itertools import chain import cgi import os import cherrypy from cherrypy import request from decorator import decorator import pkg_resources import turbogears from turbogears import flash, redirect, config, identity import turbogears.util as tg_util from turbogears.controllers import check_app_root from turbogears.identity.exceptions import RequestRequiredException import six from six.moves.urllib.parse import urlencode, urlparse, urlunparse # Save this for people who need the original url() function tg_url = turbogears.url def add_custom_stdvars(new_vars): return new_vars.update({'fedora_template': fedora_template}) def url(tgpath, tgparams=None, **kwargs): '''Computes URLs. This is a replacement for :func:`turbogears.controllers.url` (aka :func:`tg.url` in the template). In addition to the functionality that :func:`tg.url` provides, it adds a token to prevent :term:`CSRF` attacks. :arg tgpath: a list or a string. If the path is absolute (starts with a "/"), the :attr:`server.webpath`, :envvar:`SCRIPT_NAME` and the approot of the application are prepended to the path. In order for the approot to be detected properly, the root object should extend :class:`turbogears.controllers.RootController`. :kwarg tgparams: See param: ``kwargs`` :kwarg kwargs: Query parameters for the URL can be passed in as a dictionary in the second argument *or* as keyword parameters. Values which are a list or a tuple are used to create multiple key-value pairs. :returns: The changed path .. versionadded:: 0.3.10 Modified from turbogears.controllers.url for :ref:`CSRF-Protection` ''' if not isinstance(tgpath, six.string_types): tgpath = '/'.join(list(tgpath)) if tgpath.startswith('/'): webpath = (config.get('server.webpath') or '').rstrip('/') if tg_util.request_available(): check_app_root() tgpath = request.app_root + tgpath try: webpath += request.wsgi_environ['SCRIPT_NAME'].rstrip('/') except (AttributeError, KeyError): # pylint: disable-msg=W0704 # :W0704: Lack of wsgi environ is fine... we still have # server.webpath pass tgpath = webpath + tgpath if tgparams is None: tgparams = kwargs else: try: tgparams = tgparams.copy() tgparams.update(kwargs) except AttributeError: raise TypeError( 'url() expects a dictionary for query parameters') args = [] # Add the _csrf_token try: if identity.current.csrf_token: tgparams.update({'_csrf_token': identity.current.csrf_token}) except RequestRequiredException: # pylint: disable-msg=W0704 # :W0704: If we are outside of a request (called from non-controller # methods/ templates) just don't set the _csrf_token. pass # Check for query params in the current url query_params = six.iteritems(tgparams) scheme, netloc, path, params, query_s, fragment = urlparse(tgpath) if query_s: query_params = chain((p for p in cgi.parse_qsl(query_s) if p[0] != '_csrf_token'), query_params) for key, value in query_params: if value is None: continue if isinstance(value, (list, tuple)): pairs = [(key, v) for v in value] else: pairs = [(key, value)] for key, value in pairs: if value is None: continue if isinstance(value, unicode): value = value.encode('utf8') args.append((key, str(value))) query_string = urlencode(args, True) tgpath = urlunparse((scheme, netloc, path, params, query_string, fragment)) return tgpath # this is taken from turbogears 1.1 branch def _get_server_name(): """Return name of the server this application runs on. Respects 'Host' and 'X-Forwarded-Host' header. See the docstring of the 'absolute_url' function for more information. .. note:: This comes from turbogears 1.1 branch. It is only needed for _tg_absolute_url(). If we find that turbogears.get_server_name() exists, we replace this function with that one. """ get = config.get h = request.headers host = get('tg.url_domain') or h.get('X-Forwarded-Host', h.get('Host')) if not host: host = '%s:%s' % (get('server.socket_host', 'localhost'), get('server.socket_port', 8080)) return host # this is taken from turbogears 1.1 branch def tg_absolute_url(tgpath='/', params=None, **kw): """Return absolute URL (including schema and host to this server). Tries to account for 'Host' header and reverse proxying ('X-Forwarded-Host'). The host name is determined this way: * If the config setting 'tg.url_domain' is set and non-null, use this value. * Else, if the 'base_url_filter.use_x_forwarded_host' config setting is True, use the value from the 'Host' or 'X-Forwarded-Host' request header. * Else, if config setting 'base_url_filter.on' is True and 'base_url_filter.base_url' is non-null, use its value for the host AND scheme part of the URL. * As a last fallback, use the value of 'server.socket_host' and 'server.socket_port' config settings (defaults to 'localhost:8080'). The URL scheme ('http' or 'http') used is determined in the following way: * If 'base_url_filter.base_url' is used, use the scheme from this URL. * If there is a 'X-Use-SSL' request header, use 'https'. * Else, if the config setting 'tg.url_scheme' is set, use its value. * Else, use the value of 'cherrypy.request.scheme'. .. note:: This comes from turbogears 1.1 branch with one change: we call tg_url() rather than turbogears.url() so that it never adds the csrf_token .. versionadded:: 0.3.19 Modified from turbogears.absolute_url() for :ref:`CSRF-Protection` """ get = config.get use_xfh = get('base_url_filter.use_x_forwarded_host', False) if request.headers.get('X-Use-SSL'): scheme = 'https' else: scheme = get('tg.url_scheme') if not scheme: scheme = request.scheme base_url = '%s://%s' % (scheme, _get_server_name()) if get('base_url_filter.on', False) and not use_xfh: base_url = get('base_url_filter.base_url').rstrip('/') return '%s%s' % (base_url, tg_url(tgpath, params, **kw)) def absolute_url(tgpath='/', params=None, **kw): """Return absolute URL (including schema and host to this server). Tries to account for 'Host' header and reverse proxying ('X-Forwarded-Host'). The host name is determined this way: * If the config setting 'tg.url_domain' is set and non-null, use this value. * Else, if the 'base_url_filter.use_x_forwarded_host' config setting is True, use the value from the 'Host' or 'X-Forwarded-Host' request header. * Else, if config setting 'base_url_filter.on' is True and 'base_url_filter.base_url' is non-null, use its value for the host AND scheme part of the URL. * As a last fallback, use the value of 'server.socket_host' and 'server.socket_port' config settings (defaults to 'localhost:8080'). The URL scheme ('http' or 'http') used is determined in the following way: * If 'base_url_filter.base_url' is used, use the scheme from this URL. * If there is a 'X-Use-SSL' request header, use 'https'. * Else, if the config setting 'tg.url_scheme' is set, use its value. * Else, use the value of 'cherrypy.request.scheme'. .. versionadded:: 0.3.19 Modified from turbogears.absolute_url() for :ref:`CSRF-Protection` """ return url(tg_absolute_url(tgpath, params, **kw)) def enable_csrf(): '''A startup function to setup :ref:`CSRF-Protection`. This should be run at application startup. Code like the following in the start-APP script or the method in :file:`commands.py` that starts it:: from turbogears import startup from fedora.tg.util import enable_csrf startup.call_on_startup.append(enable_csrf) If we can get the :ref:`CSRF-Protection` into upstream :term:`TurboGears`, we might be able to remove this in the future. .. versionadded:: 0.3.10 Added to enable :ref:`CSRF-Protection` ''' # Override the turbogears.url function with our own # Note, this also changes turbogears.absolute_url since that calls # turbogears.url turbogears.url = url turbogears.controllers.url = url # Ignore the _csrf_token parameter ignore = config.get('tg.ignore_parameters', []) if '_csrf_token' not in ignore: ignore.append('_csrf_token') config.update({'tg.ignore_parameters': ignore}) # Add a function to the template tg stdvars that looks up a template. turbogears.view.variable_providers.append(add_custom_stdvars) def request_format(): '''Return the output format that was requested by the user. The user is able to specify a specific output format using either the ``Accept:`` HTTP header or the ``tg_format`` query parameter. This function checks both of those to determine what format the reply should be in. :rtype: string :returns: The requested format. If none was specified, 'default' is returned .. versionchanged:: 0.3.17 Return symbolic names for json, html, xhtml, and xml instead of letting raw mime types through ''' output_format = cherrypy.request.params.get('tg_format', '').lower() if not output_format: ### TODO: Two problems with this: # 1) TG lets this be extended via as_format and accept_format. We need # tie into that as well somehow. # 2) Decide whether to standardize on "json" or "application/json" accept = tg_util.simplify_http_accept_header( request.headers.get('Accept', 'default').lower()) if accept in ('text/javascript', 'application/json'): output_format = 'json' elif accept == 'text/html': output_format = 'html' elif accept == 'text/plain': output_format = 'plain' elif accept == 'text/xhtml': output_format = 'xhtml' elif accept == 'text/xml': output_format = 'xml' else: output_format = accept return output_format def jsonify_validation_errors(): '''Return an error for :term:`JSON` if validation failed. This function checks for two things: 1) We're expected to return :term:`JSON` data. 2) There were errors in the validation process. If both of those are true, this function constructs a response that will return the validation error messages as :term:`JSON` data. All controller methods that are error_handlers need to use this:: @expose(template='templates.numberform') def enter_number(self, number): errors = fedora.tg.util.jsonify_validation_errors() if errors: return errors [...] @expose(allow_json=True) @error_handler(enter_number) @validate(form=number_form) def save(self, number): return dict(success=True) :rtype: None or dict :Returns: None if there are no validation errors or :term:`JSON` isn't requested, otherwise a dictionary with the error that's suitable for return from the controller. The error message is set in tg_flash whether :term:`JSON` was requested or not. ''' # Check for validation errors errors = getattr(cherrypy.request, 'validation_errors', None) if not errors: return None # Set the message for both html and json output message = u'\n'.join([u'%s: %s' % (param, msg) for param, msg in errors.items()]) format = request_format() if format in ('html', 'xhtml'): message.translate({ord('\n'): u'<br />\n'}) flash(message) # If json, return additional information to make this an exception if format == 'json': # Note: explicit setting of tg_template is needed in TG < 1.0.4.4 # A fix has been applied for TG-1.0.4.5 return dict(exc='Invalid', tg_template='json') return None def json_or_redirect(forward_url): '''If :term:`JSON` is requested, return a dict, otherwise redirect. This is a decorator to use with a method that returns :term:`JSON` by default. If :term:`JSON` is requested, then it will return the dict from the method. If :term:`JSON` is not requested, it will redirect to the given URL. The method that is decorated should be constructed so that it calls turbogears.flash() with a message that will be displayed on the forward_url page. Use it like this:: import turbogears @json_or_redirect('http://localhost/calc/') @expose(allow_json=True) def divide(self, dividend, divisor): try: answer = dividend * 1.0 / divisor except ZeroDivisionError: turbogears.flash('Division by zero not allowed') return dict(exc='ZeroDivisionError') turbogears.flash('The quotient is %s' % answer) return dict(quotient=answer) In the example, we return either an exception or an answer, using :func:`turbogears.flash` to tell people of the result in either case. If :term:`JSON` data is requested, the user will get back a :term:`JSON` string with the proper information. If html is requested, we will be redirected to 'http://localhost/calc/' where the flashed message will be displayed. :arg forward_url: If :term:`JSON` was not requested, redirect to this URL after. .. versionadded:: 0.3.7 To make writing methods that use validation easier ''' def call(func, *args, **kwargs): if request_format() == 'json': return func(*args, **kwargs) else: func(*args, **kwargs) raise redirect(forward_url) return decorator(call) if hasattr(turbogears, 'get_server_name'): _get_server_name = turbogears.get_server_name def fedora_template(template, template_type='genshi'): '''Function to return the path to a template. :arg template: filename of the template itself. Ex: login.html :kwarg template_type: template language we need the template written in Defaults to 'genshi' :returns: filesystem path to the template ''' # :E1101: pkg_resources does have resource_filename # pylint: disable-msg=E1101 return pkg_resources.resource_filename( 'fedora', os.path.join('tg', 'templates', template_type, template)) __all__ = ( 'add_custom_stdvars', 'absolute_url', 'enable_csrf', 'fedora_template', 'jsonify_validation_errors', 'json_or_redirect', 'request_format', 'tg_absolute_url', 'tg_url', 'url')
./CrossVul/dataset_final_sorted/CWE-601/py/bad_2548_0
crossvul-python_data_good_1953_1
import re import warnings from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar from .web_exceptions import HTTPMove, HTTPPermanentRedirect from .web_request import Request from .web_response import StreamResponse from .web_urldispatcher import SystemRoute __all__ = ( "middleware", "normalize_path_middleware", ) if TYPE_CHECKING: # pragma: no cover from .web_app import Application _Func = TypeVar("_Func") async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: alt_request = request.clone(rel_url=path) match_info = await request.app.router.resolve(alt_request) alt_request._match_info = match_info # type: ignore[assignment] if match_info.http_exception is None: return True, alt_request return False, request def middleware(f: _Func) -> _Func: warnings.warn( "Middleware decorator is deprecated since 4.0 " "and its behaviour is default, " "you can simply remove this decorator.", DeprecationWarning, stacklevel=2, ) return f _Handler = Callable[[Request], Awaitable[StreamResponse]] _Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]] def normalize_path_middleware( *, append_slash: bool = True, remove_slash: bool = False, merge_slashes: bool = True, redirect_class: Type[HTTPMove] = HTTPPermanentRedirect, ) -> _Middleware: """ Middleware factory which produces a middleware that normalizes the path of a request. By normalizing it means: - Add or remove a trailing slash to the path. - Double slashes are replaced by one. The middleware returns as soon as it finds a path that resolves correctly. The order if both merge and append/remove are enabled is 1) merge slashes 2) append/remove slash 3) both merge slashes and append/remove slash. If the path resolves with at least one of those conditions, it will redirect to the new path. Only one of `append_slash` and `remove_slash` can be enabled. If both are `True` the factory will raise an assertion error If `append_slash` is `True` the middleware will append a slash when needed. If a resource is defined with trailing slash and the request comes without it, it will append it automatically. If `remove_slash` is `True`, `append_slash` must be `False`. When enabled the middleware will remove trailing slashes and redirect if the resource is defined If merge_slashes is True, merge multiple consecutive slashes in the path into one. """ correct_configuration = not (append_slash and remove_slash) assert correct_configuration, "Cannot both remove and append slash" async def impl(request: Request, handler: _Handler) -> StreamResponse: if isinstance(request.match_info.route, SystemRoute): paths_to_check = [] if "?" in request.raw_path: path, query = request.raw_path.split("?", 1) query = "?" + query else: query = "" path = request.raw_path if merge_slashes: paths_to_check.append(re.sub("//+", "/", path)) if append_slash and not request.path.endswith("/"): paths_to_check.append(path + "/") if remove_slash and request.path.endswith("/"): paths_to_check.append(path[:-1]) if merge_slashes and append_slash: paths_to_check.append(re.sub("//+", "/", path + "/")) if merge_slashes and remove_slash and path.endswith("/"): merged_slashes = re.sub("//+", "/", path) paths_to_check.append(merged_slashes[:-1]) for path in paths_to_check: path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg resolves, request = await _check_request_resolves(request, path) if resolves: raise redirect_class(request.raw_path + query) return await handler(request) return impl def _fix_request_current_app(app: "Application") -> _Middleware: async def impl(request: Request, handler: _Handler) -> StreamResponse: with request.match_info.set_current_app(app): return await handler(request) return impl
./CrossVul/dataset_final_sorted/CWE-601/py/good_1953_1
crossvul-python_data_bad_1915_8
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for interacting with Identity Servers""" import logging import urllib.parse from typing import Awaitable, Callable, Dict, List, Optional, Tuple from synapse.api.errors import ( CodeMessageException, Codes, HttpResponseException, SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, Requester from synapse.util import json_decoder from synapse.util.hash import sha256_and_url_safe_base64 from synapse.util.stringutils import assert_valid_client_secret, random_string from ._base import BaseHandler logger = logging.getLogger(__name__) id_server_scheme = "https://" class IdentityHandler(BaseHandler): def __init__(self, hs): super().__init__(hs) self.http_client = SimpleHttpClient(hs) # We create a blacklisting instance of SimpleHttpClient for contacting identity # servers specified by clients self.blacklisting_http_client = SimpleHttpClient( hs, ip_blacklist=hs.config.federation_ip_range_blacklist ) self.federation_http_client = hs.get_http_client() self.hs = hs async def threepid_from_creds( self, id_server: str, creds: Dict[str, str] ) -> Optional[JsonDict]: """ Retrieve and validate a threepid identifier from a "credentials" dictionary against a given identity server Args: id_server: The identity server to validate 3PIDs against. Must be a complete URL including the protocol (http(s)://) creds: Dictionary containing the following keys: * client_secret|clientSecret: A unique secret str provided by the client * sid: The ID of the validation session Returns: A dictionary consisting of response params to the /getValidated3pid endpoint of the Identity Service API, or None if the threepid was not found """ client_secret = creds.get("client_secret") or creds.get("clientSecret") if not client_secret: raise SynapseError( 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM ) assert_valid_client_secret(client_secret) session_id = creds.get("sid") if not session_id: raise SynapseError( 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM ) query_params = {"sid": session_id, "client_secret": client_secret} url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" try: data = await self.http_client.get_json(url, query_params) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.info( "%s returned %i for threepid validation for: %s", id_server, e.code, creds, ) return None # Old versions of Sydent return a 200 http code even on a failed validation # check. Thus, in addition to the HttpResponseException check above (which # checks for non-200 errors), we need to make sure validation_session isn't # actually an error, identified by the absence of a "medium" key # See https://github.com/matrix-org/sydent/issues/215 for details if "medium" in data: return data logger.info("%s reported non-validated threepid: %s", id_server, creds) return None async def bind_threepid( self, client_secret: str, sid: str, mxid: str, id_server: str, id_access_token: Optional[str] = None, use_v2: bool = True, ) -> JsonDict: """Bind a 3PID to an identity server Args: client_secret: A unique secret provided by the client sid: The ID of the validation session mxid: The MXID to bind the 3PID to id_server: The domain of the identity server to query id_access_token: The access token to authenticate to the identity server with, if necessary. Required if use_v2 is true use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True Returns: The response from the identity server """ logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) # If an id_access_token is not supplied, force usage of v1 if id_access_token is None: use_v2 = False # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} if use_v2: bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore else: bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) try: # Use the blacklisting http client as this call is only to identity servers # provided by a client data = await self.blacklisting_http_client.post_json_get_json( bind_url, bind_data, headers=headers ) # Remember where we bound the threepid await self.store.add_user_bound_threepid( user_id=mxid, medium=data["medium"], address=data["address"], id_server=id_server, ) return data except HttpResponseException as e: if e.code != 404 or not use_v2: logger.error("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? return data logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) res = await self.bind_threepid( client_secret, sid, mxid, id_server, id_access_token, use_v2=False ) return res async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: """Attempt to remove a 3PID from an identity server, or if one is not provided, all identity servers we're aware the binding is present on Args: mxid: Matrix user ID of binding to be removed threepid: Dict with medium & address of binding to be removed, and an optional id_server. Raises: SynapseError: If we failed to contact the identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding (or no identity server found to contact). """ if threepid.get("id_server"): id_servers = [threepid["id_server"]] else: id_servers = await self.store.get_id_servers_user_bound( user_id=mxid, medium=threepid["medium"], address=threepid["address"] ) # We don't know where to unbind, so we don't have a choice but to return if not id_servers: return False changed = True for id_server in id_servers: changed &= await self.try_unbind_threepid_with_id_server( mxid, threepid, id_server ) return changed async def try_unbind_threepid_with_id_server( self, mxid: str, threepid: dict, id_server: str ) -> bool: """Removes a binding from an identity server Args: mxid: Matrix user ID of binding to be removed threepid: Dict with medium & address of binding to be removed id_server: Identity server to unbind from Raises: SynapseError: If we failed to contact the identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding """ url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") content = { "mxid": mxid, "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, } # we abuse the federation http client to sign the request, but we have to send it # using the normal http client since we don't want the SRV lookup and want normal # 'browser-like' HTTPS. auth_headers = self.federation_http_client.build_auth_headers( destination=None, method=b"POST", url_bytes=url_bytes, content=content, destination_is=id_server.encode("ascii"), ) headers = {b"Authorization": auth_headers} try: # Use the blacklisting http client as this call is only to identity servers # provided by a client await self.blacklisting_http_client.post_json_get_json( url, content, headers ) changed = True except HttpResponseException as e: changed = False if e.code in (400, 404, 501): # The remote server probably doesn't support unbinding (yet) logger.warning("Received %d response while unbinding threepid", e.code) else: logger.error("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") await self.store.remove_user_bound_threepid( user_id=mxid, medium=threepid["medium"], address=threepid["address"], id_server=id_server, ) return changed async def send_threepid_validation( self, email_address: str, client_secret: str, send_attempt: int, send_email_func: Callable[[str, str, str, str], Awaitable], next_link: Optional[str] = None, ) -> str: """Send a threepid validation email for password reset or registration purposes Args: email_address: The user's email address client_secret: The provided client secret send_attempt: Which send attempt this is send_email_func: A function that takes an email address, token, client_secret and session_id, sends an email and returns an Awaitable. next_link: The URL to redirect the user to after validation Returns: The new session_id upon success Raises: SynapseError is an error occurred when sending the email """ # Check that this email/client_secret/send_attempt combo is new or # greater than what we've seen previously session = await self.store.get_threepid_validation_session( "email", client_secret, address=email_address, validated=False ) # Check to see if a session already exists and that it is not yet # marked as validated if session and session.get("validated_at") is None: session_id = session["session_id"] last_send_attempt = session["last_send_attempt"] # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: # If not, just return a success without sending an email return session_id else: # An non-validated session does not exist yet. # Generate a session id session_id = random_string(16) if next_link: # Manipulate the next_link to add the sid, because the caller won't get # it until we send a response, by which time we've sent the mail. if "?" in next_link: next_link += "&" else: next_link += "?" next_link += "sid=" + urllib.parse.quote(session_id) # Generate a new validation token token = random_string(32) # Send the mail with the link containing the token, client_secret # and session_id try: await send_email_func(email_address, token, client_secret, session_id) except Exception: logger.exception( "Error sending threepid validation email to %s", email_address ) raise SynapseError(500, "An error was encountered when sending the email") token_expires = ( self.hs.get_clock().time_msec() + self.hs.config.email_validation_token_lifetime ) await self.store.start_or_continue_validation_session( "email", email_address, session_id, client_secret, send_attempt, next_link, token, token_expires, ) return session_id async def requestEmailToken( self, id_server: str, email: str, client_secret: str, send_attempt: int, next_link: Optional[str] = None, ) -> JsonDict: """ Request an external server send an email on our behalf for the purposes of threepid validation. Args: id_server: The identity server to proxy to email: The email to send the message to client_secret: The unique client_secret sends by the user send_attempt: Which attempt this is next_link: A link to redirect the user to once they submit the token Returns: The json response body from the server """ params = { "email": email, "client_secret": client_secret, "send_attempt": send_attempt, } if next_link: params["next_link"] = next_link if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " "details and update your config file." ) try: data = await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/email/requestToken", params, ) return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") async def requestMsisdnToken( self, id_server: str, country: str, phone_number: str, client_secret: str, send_attempt: int, next_link: Optional[str] = None, ) -> JsonDict: """ Request an external server send an SMS message on our behalf for the purposes of threepid validation. Args: id_server: The identity server to proxy to country: The country code of the phone number phone_number: The number to send the message to client_secret: The unique client_secret sends by the user send_attempt: Which attempt this is next_link: A link to redirect the user to once they submit the token Returns: The json response body from the server """ params = { "country": country, "phone_number": phone_number, "client_secret": client_secret, "send_attempt": send_attempt, } if next_link: params["next_link"] = next_link if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " "details and update your config file." ) try: data = await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", params, ) except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") assert self.hs.config.public_baseurl # we need to tell the client to send the token back to us, since it doesn't # otherwise know where to send it, so add submit_url response parameter # (see also MSC2078) data["submit_url"] = ( self.hs.config.public_baseurl + "_matrix/client/unstable/add_threepid/msisdn/submit_token" ) return data async def validate_threepid_session( self, client_secret: str, sid: str ) -> Optional[JsonDict]: """Validates a threepid session with only the client secret and session ID Tries validating against any configured account_threepid_delegates as well as locally. Args: client_secret: A secret provided by the client sid: The ID of the session Returns: The json response if validation was successful, otherwise None """ # XXX: We shouldn't need to keep wrapping and unwrapping this value threepid_creds = {"client_secret": client_secret, "sid": sid} # We don't actually know which medium this 3PID is. Thus we first assume it's email, # and if validation fails we try msisdn validation_session = None # Try to validate as email if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: # Ask our delegated email identity server validation_session = await self.threepid_from_creds( self.hs.config.account_threepid_delegate_email, threepid_creds ) elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True ) if validation_session: return validation_session # Try to validate as msisdn if self.hs.config.account_threepid_delegate_msisdn: # Ask our delegated msisdn identity server validation_session = await self.threepid_from_creds( self.hs.config.account_threepid_delegate_msisdn, threepid_creds ) return validation_session async def proxy_msisdn_submit_token( self, id_server: str, client_secret: str, sid: str, token: str ) -> JsonDict: """Proxy a POST submitToken request to an identity server for verification purposes Args: id_server: The identity server URL to contact client_secret: Secret provided by the client sid: The ID of the session token: The verification token Raises: SynapseError: If we failed to contact the identity server Returns: The response dict from the identity server """ body = {"client_secret": client_secret, "sid": sid, "token": token} try: return await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", body, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) raise SynapseError(400, "Error contacting the identity server") async def lookup_3pid( self, id_server: str, medium: str, address: str, id_access_token: Optional[str] = None, ) -> Optional[str]: """Looks up a 3pid in the passed identity server. Args: id_server: The server name (including port, if required) of the identity server to use. medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). id_access_token: The access token to authenticate to the identity server with Returns: the matrix ID of the 3pid, or None if it is not recognized. """ if id_access_token is not None: try: results = await self._lookup_3pid_v2( id_server, id_access_token, medium, address ) return results except Exception as e: # Catch HttpResponseExcept for a non-200 response code # Check if this identity server does not know about v2 lookups if isinstance(e, HttpResponseException) and e.code == 404: # This is an old identity server that does not yet support v2 lookups logger.warning( "Attempted v2 lookup on v1 identity server %s. Falling " "back to v1", id_server, ) else: logger.warning("Error when looking up hashing details: %s", e) return None return await self._lookup_3pid_v1(id_server, medium, address) async def _lookup_3pid_v1( self, id_server: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v1 lookup. Args: id_server: The server name (including port, if required) of the identity server to use. medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). Returns: the matrix ID of the 3pid, or None if it is not recognized. """ try: data = await self.blacklisting_http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), {"medium": medium, "address": address}, ) if "mxid" in data: # note: we used to verify the identity server's signature here, but no longer # require or validate it. See the following for context: # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950 return data["mxid"] except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except IOError as e: logger.warning("Error from v1 identity server lookup: %s" % (e,)) return None async def _lookup_3pid_v2( self, id_server: str, id_access_token: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v2 lookup. Args: id_server: The server name (including port, if required) of the identity server to use. id_access_token: The access token to authenticate to the identity server with medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). Returns: the matrix ID of the 3pid, or None if it is not recognised. """ # Check what hashing details are supported by this identity server try: hash_details = await self.blacklisting_http_client.get_json( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") if not isinstance(hash_details, dict): logger.warning( "Got non-dict object when checking hash details of %s%s: %s", id_server_scheme, id_server, hash_details, ) raise SynapseError( 400, "Non-dict object from %s%s during v2 hash_details request: %s" % (id_server_scheme, id_server, hash_details), ) # Extract information from hash_details supported_lookup_algorithms = hash_details.get("algorithms") lookup_pepper = hash_details.get("lookup_pepper") if ( not supported_lookup_algorithms or not isinstance(supported_lookup_algorithms, list) or not lookup_pepper or not isinstance(lookup_pepper, str) ): raise SynapseError( 400, "Invalid hash details received from identity server %s%s: %s" % (id_server_scheme, id_server, hash_details), ) # Check if any of the supported lookup algorithms are present if LookupAlgorithm.SHA256 in supported_lookup_algorithms: # Perform a hashed lookup lookup_algorithm = LookupAlgorithm.SHA256 # Hash address, medium and the pepper with sha256 to_hash = "%s %s %s" % (address, medium, lookup_pepper) lookup_value = sha256_and_url_safe_base64(to_hash) elif LookupAlgorithm.NONE in supported_lookup_algorithms: # Perform a non-hashed lookup lookup_algorithm = LookupAlgorithm.NONE # Combine together plaintext address and medium lookup_value = "%s %s" % (address, medium) else: logger.warning( "None of the provided lookup algorithms of %s are supported: %s", id_server, supported_lookup_algorithms, ) raise SynapseError( 400, "Provided identity server does not support any v2 lookup " "algorithms that this homeserver supports.", ) # Authenticate with identity server given the access token from the client headers = {"Authorization": create_id_access_token_header(id_access_token)} try: lookup_results = await self.blacklisting_http_client.post_json_get_json( "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), { "addresses": [lookup_value], "algorithm": lookup_algorithm, "pepper": lookup_pepper, }, headers=headers, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except Exception as e: logger.warning("Error when performing a v2 3pid lookup: %s", e) raise SynapseError( 500, "Unknown error occurred during identity server lookup" ) # Check for a mapping from what we looked up to an MXID if "mappings" not in lookup_results or not isinstance( lookup_results["mappings"], dict ): logger.warning("No results from 3pid lookup") return None # Return the MXID if it's available, or None otherwise mxid = lookup_results["mappings"].get(lookup_value) return mxid async def ask_id_server_for_third_party_invite( self, requester: Requester, id_server: str, medium: str, address: str, room_id: str, inviter_user_id: str, room_alias: str, room_avatar_url: str, room_join_rules: str, room_name: str, inviter_display_name: str, inviter_avatar_url: str, id_access_token: Optional[str] = None, ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]: """ Asks an identity server for a third party invite. Args: requester id_server: hostname + optional port for the identity server. medium: The literal string "email". address: The third party address being invited. room_id: The ID of the room to which the user is invited. inviter_user_id: The user ID of the inviter. room_alias: An alias for the room, for cosmetic notifications. room_avatar_url: The URL of the room's avatar, for cosmetic notifications. room_join_rules: The join rules of the email (e.g. "public"). room_name: The m.room.name of the room. inviter_display_name: The current display name of the inviter. inviter_avatar_url: The URL of the inviter's avatar. id_access_token (str|None): The access token to authenticate to the identity server with Returns: A tuple containing: token: The token which must be signed to prove authenticity. public_keys ([{"public_key": str, "key_validity_url": str}]): public_key is a base64-encoded ed25519 public key. fallback_public_key: One element from public_keys. display_name: A user-friendly name to represent the invited user. """ invite_config = { "medium": medium, "address": address, "room_id": room_id, "room_alias": room_alias, "room_avatar_url": room_avatar_url, "room_join_rules": room_join_rules, "room_name": room_name, "sender": inviter_user_id, "sender_display_name": inviter_display_name, "sender_avatar_url": inviter_avatar_url, } # Add the identity service access token to the JSON body and use the v2 # Identity Service endpoints if id_access_token is present data = None base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) if id_access_token: key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( id_server_scheme, id_server, ) # Attempt a v2 lookup url = base_url + "/v2/store-invite" try: data = await self.blacklisting_http_client.post_json_get_json( url, invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: if e.code != 404: logger.info("Failed to POST %s with JSON: %s", url, e) raise e if data is None: key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( id_server_scheme, id_server, ) url = base_url + "/api/v1/store-invite" try: data = await self.blacklisting_http_client.post_json_get_json( url, invite_config ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( "Error trying to call /store-invite on %s%s: %s", id_server_scheme, id_server, e, ) if data is None: # Some identity servers may only support application/x-www-form-urlencoded # types. This is especially true with old instances of Sydent, see # https://github.com/matrix-org/sydent/pull/170 try: data = await self.blacklisting_http_client.post_urlencoded_get_json( url, invite_config ) except HttpResponseException as e: logger.warning( "Error calling /store-invite on %s%s with fallback " "encoding: %s", id_server_scheme, id_server, e, ) raise e # TODO: Check for success token = data["token"] public_keys = data.get("public_keys", []) if "public_key" in data: fallback_public_key = { "public_key": data["public_key"], "key_validity_url": key_validity_url, } else: fallback_public_key = public_keys[0] if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] return token, public_keys, fallback_public_key, display_name def create_id_access_token_header(id_access_token: str) -> List[str]: """Create an Authorization header for passing to SimpleHttpClient as the header value of an HTTP request. Args: id_access_token: An identity server access token. Returns: The ascii-encoded bearer token encased in a list. """ # Prefix with Bearer bearer_token = "Bearer %s" % id_access_token # Encode headers to standard ascii bearer_token.encode("ascii") # Return as a list as that's how SimpleHttpClient takes header values return [bearer_token] class LookupAlgorithm: """ Supported hashing algorithms when performing a 3PID lookup. SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 encoding NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext """ SHA256 = "sha256" NONE = "none"
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_8
crossvul-python_data_bad_752_0
"""Tornado handlers for logging into the notebook.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os try: from urllib.parse import urlparse # Py 3 except ImportError: from urlparse import urlparse # Py 2 import uuid from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import IPythonHandler class LoginHandler(IPythonHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url parsed = urlparse(url) if parsed.netloc or not (parsed.path + '/').startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if parsed.netloc: # if full URL, run our cross-origin check: origin = '%s://%s' % (parsed.scheme, parsed.netloc) origin = origin.lower() if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get('allow_password_change'): config_dir = self.settings.get('config_dir') config_file = os.path.join(config_dir, 'jupyter_notebook_config.json') set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the notebook application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The notebook server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/bad_752_0
crossvul-python_data_bad_1915_2
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import logging import sys from typing import Dict, Iterable, Optional, Set from typing_extensions import ContextManager from twisted.internet import address, reactor import synapse import synapse.events from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError from synapse.api.urls import ( CLIENT_API_PREFIX, FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_V2_PREFIX, ) from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.config.server import ListenerConfig from synapse.federation import send_queue from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.presence import ( BasePresenceHandler, PresenceState, get_interested_parties, ) from synapse.http.server import JsonResource, OptionsResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.http.presence import ( ReplicationBumpPresenceActiveTime, ReplicationPresenceSetState, ) from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, GroupServerStream, PresenceStream, PushersStream, PushRulesStream, ReceiptsStream, TagAccountDataStream, ToDeviceStream, ) from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client.v1 import events from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet from synapse.rest.client.v1.login import LoginRestServlet from synapse.rest.client.v1.profile import ( ProfileAvatarURLRestServlet, ProfileDisplaynameRestServlet, ProfileRestServlet, ) from synapse.rest.client.v1.push_rule import PushRuleRestServlet from synapse.rest.client.v1.room import ( JoinedRoomMemberListRestServlet, JoinRoomAliasServlet, PublicRoomListRestServlet, RoomEventContextServlet, RoomInitialSyncRestServlet, RoomMemberListRestServlet, RoomMembershipRestServlet, RoomMessageListRestServlet, RoomSendEventRestServlet, RoomStateEventRestServlet, RoomStateRestServlet, RoomTypingRestServlet, ) from synapse.rest.client.v1.voip import VoipRestServlet from synapse.rest.client.v2_alpha import groups, sync, user_directory from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.client.v2_alpha.account import ThreepidRestServlet from synapse.rest.client.v2_alpha.account_data import ( AccountDataServlet, RoomAccountDataServlet, ) from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.rest.client.versions import VersionsRestServlet from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") class PresenceStatusStubServlet(RestServlet): """If presence is disabled this servlet can be used to stub out setting presence status. """ PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status") def __init__(self, hs): super().__init__() self.auth = hs.get_auth() async def on_GET(self, request, user_id): await self.auth.get_user_by_req(request) return 200, {"presence": "offline"} async def on_PUT(self, request, user_id): await self.auth.get_user_by_req(request) return 200, {} class KeyUploadServlet(RestServlet): """An implementation of the `KeyUploadServlet` that responds to read only requests, but otherwise proxies through to the master instance. """ PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") def __init__(self, hs): """ Args: hs (synapse.server.HomeServer): server """ super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.http_client = hs.get_simple_http_client() self.main_uri = hs.config.worker_main_http_uri async def on_POST(self, request, device_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() body = parse_json_object_from_request(request) if device_id is not None: # passing the device_id here is deprecated; however, we allow it # for now for compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: logger.warning( "Client uploading keys for a different device " "(logged in as %s, uploading for %s)", requester.device_id, device_id, ) else: device_id = requester.device_id if device_id is None: raise SynapseError( 400, "To upload keys, you must pass device_id when authenticating" ) if body: # They're actually trying to upload something, proxy to main synapse. # Proxy headers from the original request, such as the auth headers # (in case the access token is there) and the original IP / # User-Agent of the request. headers = { header: request.requestHeaders.getRawHeaders(header, []) for header in (b"Authorization", b"User-Agent") } # Add the previous hop the the X-Forwarded-For header. x_forwarded_for = request.requestHeaders.getRawHeaders( b"X-Forwarded-For", [] ) if isinstance(request.client, (address.IPv4Address, address.IPv6Address)): previous_host = request.client.host.encode("ascii") # If the header exists, add to the comma-separated list of the first # instance of the header. Otherwise, generate a new header. if x_forwarded_for: x_forwarded_for = [ x_forwarded_for[0] + b", " + previous_host ] + x_forwarded_for[1:] else: x_forwarded_for = [previous_host] headers[b"X-Forwarded-For"] = x_forwarded_for try: result = await self.http_client.post_json_get_json( self.main_uri + request.uri.decode("ascii"), body, headers=headers ) except HttpResponseException as e: raise e.to_synapse_error() from e except RequestSendFailed as e: raise SynapseError(502, "Failed to talk to master") from e return 200, result else: # Just interested in counts. result = await self.store.count_e2e_one_time_keys(user_id, device_id) return 200, {"one_time_key_counts": result} class _NullContextManager(ContextManager[None]): """A context manager which does nothing.""" def __exit__(self, exc_type, exc_val, exc_tb): pass UPDATE_SYNCING_USERS_MS = 10 * 1000 class GenericWorkerPresence(BasePresenceHandler): def __init__(self, hs): super().__init__(hs) self.hs = hs self.is_mine_id = hs.is_mine_id self.http_client = hs.get_simple_http_client() self._presence_enabled = hs.config.use_presence # The number of ongoing syncs on this process, by user id. # Empty if _presence_enabled is false. self._user_to_num_current_syncs = {} # type: Dict[str, int] self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() # user_id -> last_sync_ms. Lists the users that have stopped syncing # but we haven't notified the master of that yet self.users_going_offline = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) self._send_stop_syncing_loop = self.clock.looping_call( self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) hs.get_reactor().addSystemEventTrigger( "before", "shutdown", run_as_background_process, "generic_presence.on_shutdown", self._on_shutdown, ) def _on_shutdown(self): if self._presence_enabled: self.hs.get_tcp_replication().send_command( ClearUserSyncsCommand(self.instance_id) ) def send_user_sync(self, user_id, is_syncing, last_sync_ms): if self._presence_enabled: self.hs.get_tcp_replication().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) def mark_as_coming_online(self, user_id): """A user has started syncing. Send a UserSync to the master, unless they had recently stopped syncing. Args: user_id (str) """ going_offline = self.users_going_offline.pop(user_id, None) if not going_offline: # Safe to skip because we haven't yet told the master they were offline self.send_user_sync(user_id, True, self.clock.time_msec()) def mark_as_going_offline(self, user_id): """A user has stopped syncing. We wait before notifying the master as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing notification to the master Args: user_id (str) """ self.users_going_offline[user_id] = self.clock.time_msec() def send_stop_syncing(self): """Check if there are any users who have stopped syncing a while ago and haven't come back yet. If there are poke the master about them. """ now = self.clock.time_msec() for user_id, last_sync_ms in list(self.users_going_offline.items()): if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: self.users_going_offline.pop(user_id, None) self.send_user_sync(user_id, False, last_sync_ms) async def user_syncing( self, user_id: str, affect_presence: bool ) -> ContextManager[None]: """Record that a user is syncing. Called by the sync and events servlets to record that a user has connected to this worker and is waiting for some events. """ if not affect_presence or not self._presence_enabled: return _NullContextManager() curr_sync = self._user_to_num_current_syncs.get(user_id, 0) self._user_to_num_current_syncs[user_id] = curr_sync + 1 # If we went from no in flight sync to some, notify replication if self._user_to_num_current_syncs[user_id] == 1: self.mark_as_coming_online(user_id) def _end(): # We check that the user_id is in user_to_num_current_syncs because # user_to_num_current_syncs may have been cleared if we are # shutting down. if user_id in self._user_to_num_current_syncs: self._user_to_num_current_syncs[user_id] -= 1 # If we went from one in flight sync to non, notify replication if self._user_to_num_current_syncs[user_id] == 0: self.mark_as_going_offline(user_id) @contextlib.contextmanager def _user_syncing(): try: yield finally: _end() return _user_syncing() async def notify_from_replication(self, states, stream_id): parties = await get_interested_parties(self.store, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( "presence_key", stream_id, rooms=room_ids_to_states.keys(), users=users_to_states.keys(), ) async def process_replication_rows(self, token, rows): states = [ UserPresenceState( row.user_id, row.state, row.last_active_ts, row.last_federation_update_ts, row.last_user_sync_ts, row.status_msg, row.currently_active, ) for row in rows ] for state in states: self.user_to_current_state[state.user_id] = state stream_id = token await self.notify_from_replication(states, stream_id) def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [ user_id for user_id, count in self._user_to_num_current_syncs.items() if count > 0 ] async def set_state(self, target_user, state, ignore_status_msg=False): """Set the presence state of the user. """ presence = state["presence"] valid_presence = ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE, ) if presence not in valid_presence: raise SynapseError(400, "Invalid presence state") user_id = target_user.to_string() # If presence is disabled, no-op if not self.hs.config.use_presence: return # Proxy request to master await self._set_state_client( user_id=user_id, state=state, ignore_status_msg=ignore_status_msg ) async def bump_presence_active_time(self, user): """We've seen the user do something that indicates they're interacting with the app. """ # If presence is disabled, no-op if not self.hs.config.use_presence: return # Proxy request to master user_id = user.to_string() await self._bump_active_client(user_id=user_id) class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, StatsStore, UIAuthWorkerStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedReceiptsStore, SlavedPushRuleStore, SlavedGroupServerStore, SlavedAccountDataStore, SlavedPusherStore, CensorEventsStore, ClientIpWorkerStore, SlavedEventStore, SlavedKeyStore, RoomStore, DirectoryStore, SlavedApplicationServiceStore, SlavedRegistrationStore, SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, SlavedPresenceStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, SearchWorkerStore, TransactionWorkerStore, BaseSlavedStore, ): pass class GenericWorkerServer(HomeServer): DATASTORE_CLASS = GenericWorkerSlavedStore def _listen_http(self, listener_config: ListenerConfig): port = listener_config.port bind_addresses = listener_config.bind_addresses assert listener_config.http_options is not None site_tag = listener_config.http_options.tag if site_tag is None: site_tag = port # We always include a health resource. resources = {"/health": HealthResource()} for res in listener_config.http_options.resources: for name in res.names: if name == "metrics": resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) elif name == "client": resource = JsonResource(self, canonical_json=False) PublicRoomListRestServlet(self).register(resource) RoomMemberListRestServlet(self).register(resource) JoinedRoomMemberListRestServlet(self).register(resource) RoomStateRestServlet(self).register(resource) RoomEventContextServlet(self).register(resource) RoomMessageListRestServlet(self).register(resource) RegisterRestServlet(self).register(resource) LoginRestServlet(self).register(resource) ThreepidRestServlet(self).register(resource) KeyQueryServlet(self).register(resource) KeyChangesServlet(self).register(resource) VoipRestServlet(self).register(resource) PushRuleRestServlet(self).register(resource) VersionsRestServlet(self).register(resource) RoomSendEventRestServlet(self).register(resource) RoomMembershipRestServlet(self).register(resource) RoomStateEventRestServlet(self).register(resource) JoinRoomAliasServlet(self).register(resource) ProfileAvatarURLRestServlet(self).register(resource) ProfileDisplaynameRestServlet(self).register(resource) ProfileRestServlet(self).register(resource) KeyUploadServlet(self).register(resource) AccountDataServlet(self).register(resource) RoomAccountDataServlet(self).register(resource) RoomTypingRestServlet(self).register(resource) sync.register_servlets(self, resource) events.register_servlets(self, resource) InitialSyncRestServlet(self).register(resource) RoomInitialSyncRestServlet(self).register(resource) user_directory.register_servlets(self, resource) # If presence is disabled, use the stub servlet that does # not allow sending presence if not self.config.use_presence: PresenceStatusStubServlet(self).register(resource) groups.register_servlets(self, resource) resources.update({CLIENT_API_PREFIX: resource}) elif name == "federation": resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) elif name == "media": if self.config.can_load_media_repo: media_repo = self.get_media_repository_resource() # We need to serve the admin servlets for media on the # worker. admin_resource = JsonResource(self, canonical_json=False) register_servlets_for_media_repo(self, admin_resource) resources.update( { MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo, "/_synapse/admin": admin_resource, } ) else: logger.warning( "A 'media' listener is configured but the media" " repository is disabled. Ignoring." ) if name == "openid" and "federation" not in res.names: # Only load the openid resource separately if federation resource # is not specified since federation resource includes openid # resource. resources.update( { FEDERATION_PREFIX: TransportLayerServer( self, servlet_groups=["openid"] ) } ) if name in ["keys", "federation"]: resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "replication": resources[REPLICATION_PREFIX] = ReplicationRestResource(self) root_resource = create_resource_tree(resources, OptionsResource()) _base.listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), reactor=self.get_reactor(), ) logger.info("Synapse worker now listening on port %d", port) def start_listening(self, listeners: Iterable[ListenerConfig]): for listener in listeners: if listener.type == "http": self._listen_http(listener) elif listener.type == "manhole": _base.listen_tcp( listener.bind_addresses, listener.port, manhole( username="matrix", password="rabbithole", globals={"hs": self} ), ) elif listener.type == "metrics": if not self.get_config().enable_metrics: logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" ) ) else: _base.listen_metrics(listener.bind_addresses, listener.port) else: logger.warning("Unsupported listener type: %s", listener.type) self.get_tcp_replication().start_replication(self) async def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) @cache_in_self def get_replication_data_handler(self): return GenericWorkerReplicationHandler(self) @cache_in_self def get_presence_handler(self): return GenericWorkerPresence(self) class GenericWorkerReplicationHandler(ReplicationDataHandler): def __init__(self, hs): super().__init__(hs) self.store = hs.get_datastore() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence self.notifier = hs.get_notifier() self.notify_pushers = hs.config.start_pushers self.pusher_pool = hs.get_pusherpool() self.send_handler = None # type: Optional[FederationSenderHandler] if hs.config.send_federation: self.send_handler = FederationSenderHandler(hs) async def on_rdata(self, stream_name, instance_name, token, rows): await super().on_rdata(stream_name, instance_name, token, rows) await self._process_and_notify(stream_name, instance_name, token, rows) async def _process_and_notify(self, stream_name, instance_name, token, rows): try: if self.send_handler: await self.send_handler.process_replication_rows( stream_name, token, rows ) if stream_name == PushRulesStream.NAME: self.notifier.on_new_event( "push_rules_key", token, users=[row.user_id for row in rows] ) elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): self.notifier.on_new_event( "account_data_key", token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: self.notifier.on_new_event( "receipt_key", token, rooms=[row.room_id for row in rows] ) await self.pusher_pool.on_new_receipts( token, token, {row.room_id for row in rows} ) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: self.notifier.on_new_event("to_device_key", token, users=entities) elif stream_name == DeviceListsStream.NAME: all_room_ids = set() # type: Set[str] for row in rows: if row.entity.startswith("@"): room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) elif stream_name == PresenceStream.NAME: await self.presence_handler.process_replication_rows(token, rows) elif stream_name == GroupServerStream.NAME: self.notifier.on_new_event( "groups_key", token, users=[row.user_id for row in rows] ) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: await self.start_pusher(row.user_id, row.app_id, row.pushkey) except Exception: logger.exception("Error processing replication") async def on_position(self, stream_name: str, instance_name: str, token: int): await super().on_position(stream_name, instance_name, token) # Also call on_rdata to ensure that stream positions are properly reset. await self.on_rdata(stream_name, instance_name, token, []) def stop_pusher(self, user_id, app_id, pushkey): if not self.notify_pushers: return key = "%s:%s" % (app_id, pushkey) pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) pusher = pushers_for_user.pop(key, None) if pusher is None: return logger.info("Stopping pusher %r / %r", user_id, key) pusher.on_stop() async def start_pusher(self, user_id, app_id, pushkey): if not self.notify_pushers: return key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" # Let's wake up the transaction queue for the server in case we have # pending stuff to send to it. if self.send_handler: self.send_handler.wake_destination(server) class FederationSenderHandler: """Processes the fedration replication stream This class is only instantiate on the worker responsible for sending outbound federation transactions. It receives rows from the replication stream and forwards the appropriate entries to the FederationSender class. """ def __init__(self, hs: GenericWorkerServer): self.store = hs.get_datastore() self._is_mine_id = hs.is_mine_id self.federation_sender = hs.get_federation_sender() self._hs = hs # Stores the latest position in the federation stream we've gotten up # to. This is always set before we use it. self.federation_position = None self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") def on_start(self): # There may be some events that are persisted but haven't been sent, # so send them now. self.federation_sender.notify_new_events( self.store.get_room_max_stream_ordering() ) def wake_destination(self, server: str): self.federation_sender.wake_destination(server) async def process_replication_rows(self, stream_name, token, rows): # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) await self.update_token(token) # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: await self._on_new_receipts(rows) # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local # clients and remote servers, so we ignore entities that start with # '@' (since they'll be local users rather than destinations). hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) async def _on_new_receipts(self, rows): """ Args: rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): new receipts to be processed """ for receipt in rows: # we only want to send on receipts for our own users if not self._is_mine_id(receipt.user_id): continue receipt_info = ReadReceipt( receipt.room_id, receipt.receipt_type, receipt.user_id, [receipt.event_id], receipt.data, ) await self.federation_sender.send_read_receipt(receipt_info) async def update_token(self, token): """Update the record of where we have processed to in the federation stream. Called after we have processed a an update received over replication. Sends a FEDERATION_ACK back to the master, and stores the token that we have processed in `federation_stream_position` so that we can restart where we left off. """ self.federation_position = token # We save and send the ACK to master asynchronously, so we don't block # processing on persistence. We don't need to do this operation for # every single RDATA we receive, we just need to do it periodically. if self._fed_position_linearizer.is_queued(None): # There is already a task queued up to save and send the token, so # no need to queue up another task. return run_as_background_process("_save_and_send_ack", self._save_and_send_ack) async def _save_and_send_ack(self): """Save the current federation position in the database and send an ACK to master with where we're up to. """ try: # We linearize here to ensure we don't have races updating the token # # XXX this appears to be redundant, since the ReplicationCommandHandler # has a linearizer which ensures that we only process one line of # replication data at a time. Should we remove it, or is it doing useful # service for robustness? Or could we replace it with an assertion that # we're not being re-entered? with (await self._fed_position_linearizer.queue(None)): # We persist and ack the same position, so we take a copy of it # here as otherwise it can get modified from underneath us. current_position = self.federation_position await self.store.update_federation_out_pos( "federation", current_position ) # We ACK this token over replication so that the master can drop # its in memory queues self._hs.get_tcp_replication().send_federation_ack(current_position) except Exception: logger.exception("Error updating federation stream position") def start(config_options): try: config = HomeServerConfig.load_config("Synapse worker", config_options) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) # For backwards compatibility let any of the old app names. assert config.worker_app in ( "synapse.app.appservice", "synapse.app.client_reader", "synapse.app.event_creator", "synapse.app.federation_reader", "synapse.app.federation_sender", "synapse.app.frontend_proxy", "synapse.app.generic_worker", "synapse.app.media_repository", "synapse.app.pusher", "synapse.app.synchrotron", "synapse.app.user_dir", ) if config.worker_app == "synapse.app.appservice": if config.appservice.notify_appservices: sys.stderr.write( "\nThe appservices must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``notify_appservices: false`` to the main config" "\n" ) sys.exit(1) # Force the appservice to start since they will be disabled in the main config config.appservice.notify_appservices = True else: # For other worker types we force this to off. config.appservice.notify_appservices = False if config.worker_app == "synapse.app.pusher": if config.server.start_pushers: sys.stderr.write( "\nThe pushers must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``start_pushers: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.start_pushers = True else: # For other worker types we force this to off. config.server.start_pushers = False if config.worker_app == "synapse.app.user_dir": if config.server.update_user_directory: sys.stderr.write( "\nThe update_user_directory must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``update_user_directory: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.update_user_directory = True else: # For other worker types we force this to off. config.server.update_user_directory = False if config.worker_app == "synapse.app.federation_sender": if config.worker.send_federation: sys.stderr.write( "\nThe send_federation must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``send_federation: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.worker.send_federation = True else: # For other worker types we force this to off. config.worker.send_federation = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts hs = GenericWorkerServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) setup_logging(hs, config, use_worker_options=True) hs.setup() # Ensure the replication streamer is always started in case we write to any # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() reactor.addSystemEventTrigger( "before", "startup", _base.start, hs, config.worker_listeners ) _base.start_worker_reactor("synapse-generic-worker", config) if __name__ == "__main__": with LoggingContext("main"): start(sys.argv[1:])
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_2
crossvul-python_data_bad_1953_1
import re import warnings from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar from .web_exceptions import HTTPMove, HTTPPermanentRedirect from .web_request import Request from .web_response import StreamResponse from .web_urldispatcher import SystemRoute __all__ = ( "middleware", "normalize_path_middleware", ) if TYPE_CHECKING: # pragma: no cover from .web_app import Application _Func = TypeVar("_Func") async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: alt_request = request.clone(rel_url=path) match_info = await request.app.router.resolve(alt_request) alt_request._match_info = match_info # type: ignore[assignment] if match_info.http_exception is None: return True, alt_request return False, request def middleware(f: _Func) -> _Func: warnings.warn( "Middleware decorator is deprecated since 4.0 " "and its behaviour is default, " "you can simply remove this decorator.", DeprecationWarning, stacklevel=2, ) return f _Handler = Callable[[Request], Awaitable[StreamResponse]] _Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]] def normalize_path_middleware( *, append_slash: bool = True, remove_slash: bool = False, merge_slashes: bool = True, redirect_class: Type[HTTPMove] = HTTPPermanentRedirect, ) -> _Middleware: """ Middleware factory which produces a middleware that normalizes the path of a request. By normalizing it means: - Add or remove a trailing slash to the path. - Double slashes are replaced by one. The middleware returns as soon as it finds a path that resolves correctly. The order if both merge and append/remove are enabled is 1) merge slashes 2) append/remove slash 3) both merge slashes and append/remove slash. If the path resolves with at least one of those conditions, it will redirect to the new path. Only one of `append_slash` and `remove_slash` can be enabled. If both are `True` the factory will raise an assertion error If `append_slash` is `True` the middleware will append a slash when needed. If a resource is defined with trailing slash and the request comes without it, it will append it automatically. If `remove_slash` is `True`, `append_slash` must be `False`. When enabled the middleware will remove trailing slashes and redirect if the resource is defined If merge_slashes is True, merge multiple consecutive slashes in the path into one. """ correct_configuration = not (append_slash and remove_slash) assert correct_configuration, "Cannot both remove and append slash" async def impl(request: Request, handler: _Handler) -> StreamResponse: if isinstance(request.match_info.route, SystemRoute): paths_to_check = [] if "?" in request.raw_path: path, query = request.raw_path.split("?", 1) query = "?" + query else: query = "" path = request.raw_path if merge_slashes: paths_to_check.append(re.sub("//+", "/", path)) if append_slash and not request.path.endswith("/"): paths_to_check.append(path + "/") if remove_slash and request.path.endswith("/"): paths_to_check.append(path[:-1]) if merge_slashes and append_slash: paths_to_check.append(re.sub("//+", "/", path + "/")) if merge_slashes and remove_slash and path.endswith("/"): merged_slashes = re.sub("//+", "/", path) paths_to_check.append(merged_slashes[:-1]) for path in paths_to_check: resolves, request = await _check_request_resolves(request, path) if resolves: raise redirect_class(request.raw_path + query) return await handler(request) return impl def _fix_request_current_app(app: "Application") -> _Middleware: async def impl(request: Request, handler: _Handler) -> StreamResponse: with request.match_info.set_current_app(app): return await handler(request) return impl
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1953_1
crossvul-python_data_bad_4386_0
"""Tornado handlers for logging into the Jupyter Server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import re import os import uuid from urllib.parse import urlparse from tornado.escape import url_escape from .security import passwd_check, set_password from ..base.handlers import JupyterHandler class LoginHandler(JupyterHandler): """The basic tornado login handler authenticates with a hashed password from the configuration. """ def _render(self, message=None): self.write(self.render_template('login.html', next=url_escape(self.get_argument('next', default=self.base_url)), message=message, )) def _redirect_safe(self, url, default=None): """Redirect if url is on our PATH Full-domain redirects are allowed if they pass our CORS origin checks. Otherwise use default (self.base_url if unspecified). """ if default is None: default = self.base_url if not url.startswith(self.base_url): # require that next_url be absolute path within our path allow = False # OR pass our cross-origin check if '://' in url: # if full URL, run our cross-origin check: parsed = urlparse(url.lower()) origin = '%s://%s' % (parsed.scheme, parsed.netloc) if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) if not allow: # not allowed, use default self.log.warning("Not allowing login redirect to %r" % url) url = default self.redirect(url) def get(self): if self.current_user: next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) else: self._render() @property def hashed_password(self): return self.password_from_settings(self.settings) def passwd_check(self, a, b): return passwd_check(a, b) def post(self): typed_password = self.get_argument('password', default=u'') new_password = self.get_argument('new_password', default=u'') if self.get_login_available(self.settings): if self.passwd_check(self.hashed_password, typed_password) and not new_password: self.set_login_cookie(self, uuid.uuid4().hex) elif self.token and self.token == typed_password: self.set_login_cookie(self, uuid.uuid4().hex) if new_password and self.settings.get('allow_password_change'): config_dir = self.settings.get('config_dir') config_file = os.path.join(config_dir, 'jupyter_server_config.json') set_password(new_password, config_file=config_file) self.log.info("Wrote hashed password to %s" % config_file) else: self.set_status(401) self._render(message={'error': 'Invalid credentials'}) return next_url = self.get_argument('next', default=self.base_url) self._redirect_safe(next_url) @classmethod def set_login_cookie(cls, handler, user_id=None): """Call this on handlers to set the login cookie for success""" cookie_options = handler.settings.get('cookie_options', {}) cookie_options.setdefault('httponly', True) # tornado <4.2 has a bug that considers secure==True as soon as # 'secure' kwarg is passed to set_secure_cookie if handler.settings.get('secure_cookie', handler.request.protocol == 'https'): cookie_options.setdefault('secure', True) cookie_options.setdefault('path', handler.base_url) handler.set_secure_cookie(handler.cookie_name, user_id, **cookie_options) return user_id auth_header_pat = re.compile('token\s+(.+)', re.IGNORECASE) @classmethod def get_token(cls, handler): """Get the user token from a request Default: - in URL parameters: ?token=<token> - in header: Authorization: token <token> """ user_token = handler.get_argument('token', '') if not user_token: # get it from Authorization header m = cls.auth_header_pat.match(handler.request.headers.get('Authorization', '')) if m: user_token = m.group(1) return user_token @classmethod def should_check_origin(cls, handler): """Should the Handler check for CORS origin validation? Origin check should be skipped for token-authenticated requests. Returns: - True, if Handler must check for valid CORS origin. - False, if Handler should skip origin check since requests are token-authenticated. """ return not cls.is_token_authenticated(handler) @classmethod def is_token_authenticated(cls, handler): """Returns True if handler has been token authenticated. Otherwise, False. Login with a token is used to signal certain things, such as: - permit access to REST API - xsrf protection - skip origin-checks for scripts """ if getattr(handler, '_user_id', None) is None: # ensure get_user has been called, so we know if we're token-authenticated handler.get_current_user() return getattr(handler, '_token_authenticated', False) @classmethod def get_user(cls, handler): """Called by handlers.get_current_user for identifying the current user. See tornado.web.RequestHandler.get_current_user for details. """ # Can't call this get_current_user because it will collide when # called on LoginHandler itself. if getattr(handler, '_user_id', None): return handler._user_id user_id = cls.get_user_token(handler) if user_id is None: get_secure_cookie_kwargs = handler.settings.get('get_secure_cookie_kwargs', {}) user_id = handler.get_secure_cookie(handler.cookie_name, **get_secure_cookie_kwargs ) else: cls.set_login_cookie(handler, user_id) # Record that the current request has been authenticated with a token. # Used in is_token_authenticated above. handler._token_authenticated = True if user_id is None: # If an invalid cookie was sent, clear it to prevent unnecessary # extra warnings. But don't do this on a request with *no* cookie, # because that can erroneously log you out (see gh-3365) if handler.get_cookie(handler.cookie_name) is not None: handler.log.warning("Clearing invalid/expired login cookie %s", handler.cookie_name) handler.clear_login_cookie() if not handler.login_available: # Completely insecure! No authentication at all. # No need to warn here, though; validate_security will have already done that. user_id = 'anonymous' # cache value for future retrievals on the same request handler._user_id = user_id return user_id @classmethod def get_user_token(cls, handler): """Identify the user based on a token in the URL or Authorization header Returns: - uuid if authenticated - None if not """ token = handler.token if not token: return # check login token from URL argument or Authorization header user_token = cls.get_token(handler) authenticated = False if user_token == token: # token-authenticated, set the login cookie handler.log.debug("Accepting token-authenticated connection from %s", handler.request.remote_ip) authenticated = True if authenticated: return uuid.uuid4().hex else: return None @classmethod def validate_security(cls, app, ssl_options=None): """Check the application's security. Show messages, or abort if necessary, based on the security configuration. """ if not app.ip: warning = "WARNING: The Jupyter server is listening on all IP addresses" if ssl_options is None: app.log.warning(warning + " and not using encryption. This " "is not recommended.") if not app.password and not app.token: app.log.warning(warning + " and not using authentication. " "This is highly insecure and not recommended.") else: if not app.password and not app.token: app.log.warning( "All authentication is disabled." " Anyone who can connect to this server will be able to run code.") @classmethod def password_from_settings(cls, settings): """Return the hashed password from the tornado settings. If there is no configured password, an empty string will be returned. """ return settings.get('password', u'') @classmethod def get_login_available(cls, settings): """Whether this LoginHandler is needed - and therefore whether the login page should be displayed.""" return bool(cls.password_from_settings(settings) or settings.get('token'))
./CrossVul/dataset_final_sorted/CWE-601/py/bad_4386_0
crossvul-python_data_bad_1915_9
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import urllib.parse from io import BytesIO from typing import ( TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union, ) import treq from canonicaljson import encode_canonical_json from netaddr import IPAddress, IPSet from prometheus_client import Counter from zope.interface import implementer, provider from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE from twisted.internet import defer, error as twisted_error, protocol, ssl from twisted.internet.interfaces import ( IAddress, IHostResolution, IReactorPluggableNameResolver, IResolutionReceiver, ) from twisted.internet.task import Cooperator from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone from twisted.web.client import ( Agent, HTTPConnectionPool, ResponseNeverReceived, readBody, ) from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IBodyProducer, IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred if TYPE_CHECKING: from synapse.app.homeserver import HomeServer logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) incoming_responses_counter = Counter( "synapse_http_client_responses", "", ["method", "code"] ) # the type of the headers list, to be passed to the t.w.h.Headers. # Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so # we simplify. RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. RawHeaderValue = Sequence[Union[str, bytes]] # the type of the query params, to be passed into `urlencode` QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]] QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]] def check_against_blacklist( ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check ip_whitelist: Allowed IP addresses. ip_blacklist: Disallowed IP addresses. Returns: True if the IP address is in the blacklist and not in the whitelist. """ if ip_address in ip_blacklist: if ip_whitelist is None or ip_address not in ip_whitelist: return True return False _EPSILON = 0.00000001 def _make_scheduler(reactor): """Makes a schedular suitable for a Cooperator using the given reactor. (This is effectively just a copy from `twisted.internet.task`) """ def _scheduler(x): return reactor.callLater(_EPSILON, x) return _scheduler class IPBlacklistingResolver: """ A proxy for reactor.nameResolver which only produces non-blacklisted IP addresses, preventing DNS rebinding attacks on URL preview. """ def __init__( self, reactor: IReactorPluggableNameResolver, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet, ): """ Args: reactor: The twisted reactor. ip_whitelist: IP addresses to allow. ip_blacklist: IP addresses to disallow. """ self._reactor = reactor self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist def resolveHostName( self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0 ) -> IResolutionReceiver: r = recv() addresses = [] # type: List[IAddress] def _callback() -> None: r.resolutionBegan(None) has_bad_ip = False for i in addresses: ip_address = IPAddress(i.host) if check_against_blacklist( ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info( "Dropped %s from DNS resolution to %s due to blacklist" % (ip_address, hostname) ) has_bad_ip = True # if we have a blacklisted IP, we'd like to raise an error to block the # request, but all we can really do from here is claim that there were no # valid results. if not has_bad_ip: for i in addresses: r.addressResolved(i) r.resolutionComplete() @provider(IResolutionReceiver) class EndpointReceiver: @staticmethod def resolutionBegan(resolutionInProgress: IHostResolution) -> None: pass @staticmethod def addressResolved(address: IAddress) -> None: addresses.append(address) @staticmethod def resolutionComplete() -> None: _callback() self._reactor.nameResolver.resolveHostName( EndpointReceiver, hostname, portNumber=portNumber ) return r class BlacklistingAgentWrapper(Agent): """ An Agent wrapper which will prevent access to IP addresses being accessed directly (without an IP address lookup). """ def __init__( self, agent: IAgent, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, ): """ Args: agent: The Agent to wrap. ip_whitelist: IP addresses to allow. ip_blacklist: IP addresses to disallow. """ self._agent = agent self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist def request( self, method: bytes, uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, ) -> defer.Deferred: h = urllib.parse.urlparse(uri.decode("ascii")) try: ip_address = IPAddress(h.hostname) if check_against_blacklist( ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info("Blocking access to %s due to blacklist" % (ip_address,)) e = SynapseError(403, "IP address blocked by IP blacklist entry") return defer.fail(Failure(e)) except Exception: # Not an IP pass return self._agent.request( method, uri, headers=headers, bodyProducer=bodyProducer ) class SimpleHttpClient: """ A simple, no-frills HTTP client with methods that wrap up common ways of using HTTP in Matrix """ def __init__( self, hs: "HomeServer", treq_args: Dict[str, Any] = {}, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, http_proxy: Optional[bytes] = None, https_proxy: Optional[bytes] = None, ): """ Args: hs treq_args: Extra keyword arguments to be given to treq.request. ip_blacklist: The IP addresses that are blacklisted that we may not request. ip_whitelist: The whitelisted IP addresses, that we can request if it were otherwise caught in a blacklist. http_proxy: proxy server to use for http connections. host[:port] https_proxy: proxy server to use for https connections. host[:port] """ self.hs = hs self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args self.user_agent = hs.version_string self.clock = hs.get_clock() if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix) # We use this for our body producers to ensure that they use the correct # reactor. self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) self.user_agent = self.user_agent.encode("ascii") if self._ip_blacklist: real_reactor = hs.get_reactor() # If we have an IP blacklist, we need to use a DNS resolver which # filters out blacklisted IP addresses, to prevent DNS rebinding. nameResolver = IPBlacklistingResolver( real_reactor, self._ip_whitelist, self._ip_blacklist ) @implementer(IReactorPluggableNameResolver) class Reactor: def __getattr__(_self, attr): if attr == "nameResolver": return nameResolver else: return getattr(real_reactor, attr) self.reactor = Reactor() else: self.reactor = hs.get_reactor() # the pusher makes lots of concurrent SSL connections to sygnal, and # tends to do so in batches, so we need to allow the pool to keep # lots of idle connections around. pool = HTTPConnectionPool(self.reactor) # XXX: The justification for using the cache factor here is that larger instances # will need both more cache and more connections. # Still, this should probably be a separate dial pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5)) pool.cachedConnectionTimeout = 2 * 60 self.agent = ProxyAgent( self.reactor, connectTimeout=15, contextFactory=self.hs.get_http_client_context_factory(), pool=pool, http_proxy=http_proxy, https_proxy=https_proxy, ) if self._ip_blacklist: # If we have an IP blacklist, we then install the blacklisting Agent # which prevents direct access to IP addresses, that are not caught # by the DNS resolution. self.agent = BlacklistingAgentWrapper( self.agent, ip_whitelist=self._ip_whitelist, ip_blacklist=self._ip_blacklist, ) async def request( self, method: str, uri: str, data: Optional[bytes] = None, headers: Optional[Headers] = None, ) -> IResponse: """ Args: method: HTTP method to use. uri: URI to query. data: Data to send in the request body, if applicable. headers: Request headers. Returns: Response object, once the headers have been read. Raises: RequestTimedOutError if the request times out before the headers are read """ outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.debug("Sending request %s %s", method, redact_uri(uri)) with start_active_span( "outgoing-client-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.HTTP_METHOD: method, tags.HTTP_URL: uri, }, finish_on_close=True, ): try: body_producer = None if data is not None: body_producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator, ) request_deferred = treq.request( method, uri, agent=self.agent, data=body_producer, headers=headers, **self._extra_treq_args, ) # type: defer.Deferred # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), ) # turn timeouts into RequestTimedOutErrors request_deferred.addErrback(_timeout_to_request_timed_out_error) response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code, ) return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.args[0], ) set_tag(tags.ERROR, True) set_tag("error_reason", e.args[0]) raise async def post_urlencoded_get_json( self, uri: str, args: Optional[Mapping[str, Union[str, List[str]]]] = None, headers: Optional[RawHeaders] = None, ) -> Any: """ Args: uri: uri to query args: parameters to be url-encoded in the body headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = encode_query_args(args) actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "POST", uri, headers=Headers(actual_headers), data=query_bytes ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def post_json_get_json( self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None ) -> Any: """ Args: uri: URI to query. post_json: request body, to be encoded as json headers: a map from header name to a list of values for that header Returns: parsed json Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "POST", uri, headers=Headers(actual_headers), data=json_str ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def get_json( self, uri: str, args: Optional[QueryParams] = None, headers: Optional[RawHeaders] = None, ) -> Any: """Gets some json from the given URI. Args: uri: The URI to request, not including query parameters args: A dictionary used to create query string headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ actual_headers = {b"Accept": [b"application/json"]} if headers: actual_headers.update(headers) # type: ignore body = await self.get_raw(uri, args, headers=headers) return json_decoder.decode(body.decode("utf-8")) async def put_json( self, uri: str, json_body: Any, args: Optional[QueryParams] = None, headers: RawHeaders = None, ) -> Any: """Puts some json to the given URI. Args: uri: The URI to request, not including query parameters json_body: The JSON to put in the HTTP body, args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) json_str = encode_canonical_json(json_body) actual_headers = { b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent], b"Accept": [b"application/json"], } if headers: actual_headers.update(headers) # type: ignore response = await self.request( "PUT", uri, headers=Headers(actual_headers), data=json_str ) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return json_decoder.decode(body.decode("utf-8")) else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) async def get_raw( self, uri: str, args: Optional[QueryParams] = None, headers: Optional[RawHeaders] = None, ) -> bytes: """Gets raw text from the given URI. Args: uri: The URI to request, not including query parameters args: A dictionary used to create query strings headers: a map from header name to a list of values for that header Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. HttpResponseException on a non-2xx HTTP response. """ if args: query_str = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_str) actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) # type: ignore response = await self.request("GET", uri, headers=Headers(actual_headers)) body = await make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: return body else: raise HttpResponseException( response.code, response.phrase.decode("ascii", errors="replace"), body ) # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. async def get_file( self, url: str, output_stream: BinaryIO, max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: """GETs a file from a given URL Args: url: The URL to GET output_stream: File to write the response body to. headers: A map from header name to a list of values for that header Returns: A tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. Raises: RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. SynapseError: if the response is not a 2xx, the remote file is too large, or another exception happens during the download. """ actual_headers = {b"User-Agent": [self.user_agent]} if headers: actual_headers.update(headers) # type: ignore response = await self.request("GET", url, headers=Headers(actual_headers)) resp_headers = dict(response.headers.getAllRawHeaders()) if ( b"Content-Length" in resp_headers and max_size and int(resp_headers[b"Content-Length"][0]) > max_size ): logger.warning("Requested URL is too large > %r bytes" % (max_size,)) raise SynapseError( 502, "Requested file is too large > %r bytes" % (max_size,), Codes.TOO_LARGE, ) if response.code > 299: logger.warning("Got %d when downloading %s" % (response.code, url)) raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN) # TODO: if our Content-Type is HTML or something, just read the first # N bytes into RAM rather than saving it all to disk only to read it # straight back in again try: length = await make_deferred_yieldable( readBodyToFile(response, output_stream, max_size) ) except SynapseError: # This can happen e.g. because the body is too large. raise except Exception as e: raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e return ( length, resp_headers, response.request.absoluteURI.decode("ascii"), response.code, ) def _timeout_to_request_timed_out_error(f: Failure): if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): # The TCP connection has its own timeout (set by the 'connectTimeout' param # on the Agent), which raises twisted_error.TimeoutError exception. raise RequestTimedOutError("Timeout connecting to remote server") elif f.check(defer.TimeoutError, ResponseNeverReceived): # this one means that we hit our overall timeout on the request raise RequestTimedOutError("Timeout waiting for response from remote server") return f class _ReadBodyToFileProtocol(protocol.Protocol): def __init__( self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int] ): self.stream = stream self.deferred = deferred self.length = 0 self.max_size = max_size def dataReceived(self, data: bytes) -> None: self.stream.write(data) self.length += len(data) if self.max_size is not None and self.length >= self.max_size: self.deferred.errback( SynapseError( 502, "Requested file is too large > %r bytes" % (self.max_size,), Codes.TOO_LARGE, ) ) self.deferred = defer.Deferred() self.transport.loseConnection() def connectionLost(self, reason: Failure) -> None: if reason.check(ResponseDone): self.deferred.callback(self.length) elif reason.check(PotentialDataLoss): # stolen from https://github.com/twisted/treq/pull/49/files # http://twistedmatrix.com/trac/ticket/4840 self.deferred.callback(self.length) else: self.deferred.errback(reason) def readBodyToFile( response: IResponse, stream: BinaryIO, max_size: Optional[int] ) -> defer.Deferred: """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. Args: response: The HTTP response to read from. stream: The file-object to write to. max_size: The maximum file size to allow. Returns: A Deferred which resolves to the length of the read body. """ d = defer.Deferred() response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size)) return d def encode_query_args(args: Optional[Mapping[str, Union[str, List[str]]]]) -> bytes: """ Encodes a map of query arguments to bytes which can be appended to a URL. Args: args: The query arguments, a mapping of string to string or list of strings. Returns: The query arguments encoded as bytes. """ if args is None: return b"" encoded_args = {} for k, vs in args.items(): if isinstance(vs, str): vs = [vs] encoded_args[k] = [v.encode("utf8") for v in vs] query_str = urllib.parse.urlencode(encoded_args, True) return query_str.encode("utf8") class InsecureInterceptableContextFactory(ssl.ContextFactory): """ Factory for PyOpenSSL SSL contexts which accepts any certificate for any domain. Do not use this since it allows an attacker to intercept your communications. """ def __init__(self): self._context = SSL.Context(SSL.SSLv23_METHOD) self._context.set_verify(VERIFY_NONE, lambda *_: None) def getContext(self, hostname=None, port=None): return self._context def creatorForNetloc(self, hostname, port): return self
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_9
crossvul-python_data_good_3250_1
""" .. module: security_monkey.sso.views :platform: Unix :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more :license: Apache, see LICENSE for more details. .. moduleauthor:: Patrick Kelley <patrick@netflix.com> """ import jwt import base64 import requests from flask import Blueprint, current_app, redirect, request from flask.ext.restful import reqparse, Resource, Api from flask.ext.principal import Identity, identity_changed from flask_security.utils import login_user try: from onelogin.saml2.auth import OneLogin_Saml2_Auth from onelogin.saml2.utils import OneLogin_Saml2_Utils onelogin_import_success = True except ImportError: onelogin_import_success = False from .service import fetch_token_header_payload, get_rsa_public_key from security_monkey.datastore import User from security_monkey import db, rbac from urlparse import urlparse mod = Blueprint('sso', __name__) api = Api(mod) from flask_security.utils import validate_redirect_url class Ping(Resource): """ This class serves as an example of how one might implement an SSO provider for use with Security Monkey. In this example we use a OpenIDConnect authentication flow, that is essentially OAuth2 underneath. """ decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() super(Ping, self).__init__() def get(self): return self.post() def post(self): if "ping" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Ping is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 default_state = 'clientId,{client_id},redirectUri,{redirectUri},return_to,{return_to}'.format( client_id=current_app.config.get('PING_CLIENT_ID'), redirectUri=current_app.config.get('PING_REDIRECT_URI'), return_to=current_app.config.get('WEB_PATH') ) self.reqparse.add_argument('code', type=str, required=True) self.reqparse.add_argument('state', type=str, required=False, default=default_state) args = self.reqparse.parse_args() client_id = args['state'].split(',')[1] redirect_uri = args['state'].split(',')[3] return_to = args['state'].split(',')[5] if not validate_redirect_url(return_to): return_to = current_app.config.get('WEB_PATH') # take the information we have received from the provider to create a new request params = { 'client_id': client_id, 'grant_type': 'authorization_code', 'scope': 'openid email profile address', 'redirect_uri': redirect_uri, 'code': args['code'] } # you can either discover these dynamically or simply configure them access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL') user_api_url = current_app.config.get('PING_USER_API_URL') # the secret and cliendId will be given to you when you signup for the provider basic = base64.b64encode(bytes('{0}:{1}'.format(client_id, current_app.config.get("PING_SECRET")))) headers = {'Authorization': 'Basic {0}'.format(basic.decode('utf-8'))} # exchange authorization code for access token. r = requests.post(access_token_url, headers=headers, params=params) id_token = r.json()['id_token'] access_token = r.json()['access_token'] # fetch token public key header_data = fetch_token_header_payload(id_token)[0] jwks_url = current_app.config.get('PING_JWKS_URL') # retrieve the key material as specified by the token header r = requests.get(jwks_url) for key in r.json()['keys']: if key['kid'] == header_data['kid']: secret = get_rsa_public_key(key['n'], key['e']) algo = header_data['alg'] break else: return dict(message='Key not found'), 403 # validate your token based on the key it was signed with try: current_app.logger.debug(id_token) current_app.logger.debug(secret) current_app.logger.debug(algo) jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=client_id) except jwt.DecodeError: return dict(message='Token is invalid'), 403 except jwt.ExpiredSignatureError: return dict(message='Token has expired'), 403 except jwt.InvalidTokenError: return dict(message='Token is invalid'), 403 user_params = dict(access_token=access_token, schema='profile') # retrieve information about the current user. r = requests.get(user_api_url, params=user_params) profile = r.json() user = User.query.filter(User.email==profile['email']).first() # if we get an sso user create them an account if not user: user = User( email=profile['email'], active=True, role='View' # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) return redirect(return_to, code=302) class Google(Resource): decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() super(Google, self).__init__() def get(self): return self.post() def post(self): if "google" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Google is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 default_state = 'clientId,{client_id},redirectUri,{redirectUri},return_to,{return_to}'.format( client_id=current_app.config.get("GOOGLE_CLIENT_ID"), redirectUri=api.url_for(Google), return_to=current_app.config.get('WEB_PATH') ) self.reqparse.add_argument('code', type=str, required=True) self.reqparse.add_argument('state', type=str, required=False, default=default_state) args = self.reqparse.parse_args() client_id = args['state'].split(',')[1] redirect_uri = args['state'].split(',')[3] return_to = args['state'].split(',')[5] if not validate_redirect_url(return_to): return_to = current_app.config.get('WEB_PATH') access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' args = self.reqparse.parse_args() # Step 1. Exchange authorization code for access token payload = { 'client_id': client_id, 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri, 'code': args['code'], 'client_secret': current_app.config.get('GOOGLE_SECRET') } r = requests.post(access_token_url, data=payload) token = r.json() # Step 1bis. Validate (some information of) the id token (if necessary) google_hosted_domain = current_app.config.get("GOOGLE_HOSTED_DOMAIN") if google_hosted_domain is not None: current_app.logger.debug('We need to verify that the token was issued for this hosted domain: %s ' % (google_hosted_domain)) # Get the JSON Web Token id_token = r.json()['id_token'] current_app.logger.debug('The id_token is: %s' % (id_token)) # Extract the payload (header_data, payload_data) = fetch_token_header_payload(id_token) current_app.logger.debug('id_token.header_data: %s' % (header_data)) current_app.logger.debug('id_token.payload_data: %s' % (payload_data)) token_hd = payload_data.get('hd') if token_hd != google_hosted_domain: current_app.logger.debug('Verification failed: %s != %s' % (token_hd, google_hosted_domain)) return dict(message='Token is invalid %s' % token), 403 current_app.logger.debug('Verification passed') # Step 2. Retrieve information about the current user headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} r = requests.get(people_api_url, headers=headers) profile = r.json() user = User.query.filter(User.email == profile['email']).first() # if we get an sso user create them an account if not user: user = User( email=profile['email'], active=True, role='View' # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) return redirect(return_to, code=302) class OneLogin(Resource): decorators = [rbac.allow(["anonymous"], ["GET", "POST"])] def __init__(self): self.reqparse = reqparse.RequestParser() self.req = OneLogin.prepare_from_flask_request(request) super(OneLogin, self).__init__() @staticmethod def prepare_from_flask_request(req): url_data = urlparse(req.url) return { 'http_host': req.host, 'server_port': url_data.port, 'script_name': req.path, 'get_data': req.args.copy(), 'post_data': req.form.copy(), 'https': ("on" if current_app.config.get("ONELOGIN_HTTPS") else "off") } def get(self): return self.post() def _consumer(self, auth): auth.process_response() errors = auth.get_errors() if not errors: if auth.is_authenticated: return True else: return False else: current_app.logger.error('Error processing %s' % (', '.join(errors))) return False def post(self): if "onelogin" not in current_app.config.get("ACTIVE_PROVIDERS"): return "Onelogin is not enabled in the config. See the ACTIVE_PROVIDERS section.", 404 auth = OneLogin_Saml2_Auth(self.req, current_app.config.get("ONELOGIN_SETTINGS")) self.reqparse.add_argument('return_to', required=False, default=current_app.config.get('WEB_PATH')) self.reqparse.add_argument('acs', required=False) self.reqparse.add_argument('sls', required=False) args = self.reqparse.parse_args() return_to = args['return_to'] if args['acs'] != None: # valids the SAML response and checks if successfully authenticated if self._consumer(auth): email = auth.get_attribute(current_app.config.get("ONELOGIN_EMAIL_FIELD"))[0] user = User.query.filter(User.email == email).first() # if we get an sso user create them an account if not user: user = User( email=email, active=True, role=current_app.config.get('ONELOGIN_DEFAULT_ROLE') # profile_picture=profile.get('thumbnailPhotoUrl') ) db.session.add(user) db.session.commit() db.session.refresh(user) # Tell Flask-Principal the identity changed identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) login_user(user) self_url = OneLogin_Saml2_Utils.get_self_url(self.req) if 'RelayState' in request.form and self_url != request.form['RelayState']: return redirect(auth.redirect_to(request.form['RelayState']), code=302) else: return redirect(current_app.config.get('BASE_URL'), code=302) else: return dict(message='OneLogin authentication failed.'), 403 elif args['sls'] != None: return dict(message='OneLogin SLS not implemented yet.'), 405 else: return redirect(auth.login(return_to=return_to)) class Providers(Resource): decorators = [rbac.allow(["anonymous"], ["GET"])] def __init__(self): super(Providers, self).__init__() def get(self): active_providers = [] for provider in current_app.config.get("ACTIVE_PROVIDERS"): provider = provider.lower() if provider == "ping": active_providers.append({ 'name': current_app.config.get("PING_NAME"), 'url': current_app.config.get('PING_REDIRECT_URI'), 'redirectUri': current_app.config.get("PING_REDIRECT_URI"), 'clientId': current_app.config.get("PING_CLIENT_ID"), 'responseType': 'code', 'scope': ['openid', 'profile', 'email'], 'scopeDelimiter': ' ', 'authorizationEndpoint': current_app.config.get("PING_AUTH_ENDPOINT"), 'requiredUrlParams': ['scope'], 'type': '2.0' }) elif provider == "google": google_provider = { 'name': 'google', 'clientId': current_app.config.get("GOOGLE_CLIENT_ID"), 'url': api.url_for(Google, _external=True, _scheme='https'), 'redirectUri': api.url_for(Google, _external=True, _scheme='https'), 'authorizationEndpoint': current_app.config.get("GOOGLE_AUTH_ENDPOINT"), 'scope': ['openid email'], 'responseType': 'code' } google_hosted_domain = current_app.config.get("GOOGLE_HOSTED_DOMAIN") if google_hosted_domain is not None: google_provider['hd'] = google_hosted_domain active_providers.append(google_provider) elif provider == "onelogin": active_providers.append({ 'name': 'OneLogin', 'authorizationEndpoint': api.url_for(OneLogin) }) else: raise Exception("Unknown authentication provider: {0}".format(provider)) return active_providers api.add_resource(Ping, '/auth/ping', endpoint='ping') api.add_resource(Google, '/auth/google', endpoint='google') api.add_resource(Providers, '/auth/providers', endpoint='providers') if onelogin_import_success: api.add_resource(OneLogin, '/auth/onelogin', endpoint='onelogin')
./CrossVul/dataset_final_sorted/CWE-601/py/good_3250_1
crossvul-python_data_bad_4332_1
"""Base Tornado handlers for the notebook server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import datetime import functools import ipaddress import json import mimetypes import os import re import sys import traceback import types import warnings from http.client import responses from http.cookies import Morsel from urllib.parse import urlparse from jinja2 import TemplateNotFound from tornado import web, gen, escape, httputil from tornado.log import app_log import prometheus_client from notebook._sysinfo import get_sys_info from traitlets.config import Application from ipython_genutils.path import filefind from ipython_genutils.py3compat import string_types import notebook from notebook._tz import utcnow from notebook.i18n import combine_translations from notebook.utils import is_hidden, url_path_join, url_is_absolute, url_escape, urldecode_unix_socket_path from notebook.services.security import csp_report_uri #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') _sys_info_cache = None def json_sys_info(): global _sys_info_cache if _sys_info_cache is None: _sys_info_cache = json.dumps(get_sys_info()) return _sys_info_cache def log(): if Application.initialized(): return Application.instance().log else: return app_log class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" @property def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ if 'Content-Security-Policy' in self.settings.get('headers', {}): # user-specified, don't override return self.settings['headers']['Content-Security-Policy'] return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + self.settings.get('csp_report_uri', url_path_join(self.base_url, csp_report_uri)), ]) def set_default_headers(self): headers = {} headers["X-Content-Type-Options"] = "nosniff" headers.update(self.settings.get('headers', {})) headers["Content-Security-Policy"] = self.content_security_policy # Allow for overriding headers for header_name, value in headers.items(): try: self.set_header(header_name, value) except Exception as e: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) self.log.debug(e) def force_clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Tornado's cookie handling currently (Jan 2018) stores cookies in a dict keyed by name, so it can only modify one cookie with a given name per response. The browser can store multiple cookies with the same name but different domains and/or paths. This method lets us clear multiple cookies with the same name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). """ name = escape.native_str(name) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) morsel = Morsel() morsel.set(name, '', '""') morsel['expires'] = httputil.format_timestamp(expires) morsel['path'] = path if domain: morsel['domain'] = domain self.add_header("Set-Cookie", morsel.OutputString()) def clear_login_cookie(self): cookie_options = self.settings.get('cookie_options', {}) path = cookie_options.setdefault('path', self.base_url) self.clear_cookie(self.cookie_name, path=path) if path and path != '/': # also clear cookie on / to ensure old cookies are cleared # after the change in path behavior (changed in notebook 5.2.2). # N.B. This bypasses the normal cookie handling, which can't update # two cookies with the same name. See the method above. self.force_clear_cookie(self.cookie_name) def get_current_user(self): if self.login_handler is None: return 'anonymous' return self.login_handler.get_user(self) def skip_check_origin(self): """Ask my login_handler if I should skip the origin_check For example: in the default LoginHandler, if a request is token-authenticated, origin checking should be skipped. """ if self.request.method == 'OPTIONS': # no origin-check on options requests, which are used to check origins! return True if self.login_handler is None or not hasattr(self.login_handler, 'should_check_origin'): return False return not self.login_handler.should_check_origin(self) @property def token_authenticated(self): """Have I been authenticated with a token?""" if self.login_handler is None or not hasattr(self.login_handler, 'is_token_authenticated'): return False return self.login_handler.is_token_authenticated(self) @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def logged_in(self): """Is a user currently logged in?""" user = self.get_current_user() return (user and not user == 'anonymous') @property def login_handler(self): """Return the login handler for this application, if any.""" return self.settings.get('login_handler_class', None) @property def token(self): """Return the login token for this application, if any.""" return self.settings.get('token', None) @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ if self.login_handler is None: return False return bool(self.login_handler.get_login_available(self.settings)) class IPythonHandler(AuthenticatedHandler): """IPython-specific extensions to authenticated handling Mostly property shortcuts to IPython-specific settings. """ @property def ignore_minified_js(self): """Wether to user bundle in template. (*.min files) Mainly use for development and avoid file recompilation """ return self.settings.get('ignore_minified_js', False) @property def config(self): return self.settings.get('config', None) @property def log(self): """use the IPython log by default, falling back on tornado's logger""" return log() @property def jinja_template_vars(self): """User-supplied values to supply to jinja templates.""" return self.settings.get('jinja_template_vars', {}) #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def version_hash(self): """The version hash to use for cache hints for static files""" return self.settings.get('version_hash', '') @property def mathjax_url(self): url = self.settings.get('mathjax_url', '') if not url or url_is_absolute(url): return url return url_path_join(self.base_url, url) @property def mathjax_config(self): return self.settings.get('mathjax_config', 'TeX-AMS-MML_HTMLorMML-full,Safe') @property def base_url(self): return self.settings.get('base_url', '/') @property def default_url(self): return self.settings.get('default_url', '') @property def ws_url(self): return self.settings.get('websocket_url', '') @property def contents_js_source(self): self.log.debug("Using contents: %s", self.settings.get('contents_js_source', 'services/contents')) return self.settings.get('contents_js_source', 'services/contents') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def contents_manager(self): return self.settings['contents_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def terminal_manager(self): return self.settings['terminal_manager'] @property def kernel_spec_manager(self): return self.settings['kernel_spec_manager'] @property def config_manager(self): return self.settings['config_manager'] #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(IPythonHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) elif ( self.token_authenticated and "Access-Control-Allow-Origin" not in self.settings.get('headers', {}) ): # allow token-authenticated requests cross-origin by default. # only apply this exception if allow-origin has not been specified. self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def set_attachment_header(self, filename): """Set Content-Disposition: attachment header As a method to ensure handling of filename encoding """ escaped_filename = url_escape(filename) self.set_header('Content-Disposition', 'attachment;' " filename*=utf-8''{utf8}" .format( utf8=escaped_filename, ) ) def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin # origin_to_satisfy_tornado is present because tornado requires # check_origin to take an origin argument, but we don't use it def check_origin(self, origin_to_satisfy_tornado=""): """Check Origin for cross-site API requests, including websockets Copied from WebSocket with changes: - allow unspecified host/origin (e.g. scripts) - allow token-authenticated requests """ if self.allow_origin == '*' or self.skip_check_origin(): return True host = self.request.headers.get("Host") origin = self.request.headers.get("Origin") # If no header is provided, let the request through. # Origin can be None for: # - same-origin (IE, Firefox) # - Cross-site POST form (IE, Firefox) # - Scripts # The cross-site POST (XSRF) case is handled by tornado's xsrf_token if origin is None or host is None: return True origin = origin.lower() origin_host = urlparse(origin).netloc # OK if origin matches host if origin_host == host: return True # Check CORS headers if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS headers deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin API request for %s. Origin: %s, Host: %s", self.request.path, origin, host, ) return allow def check_referer(self): """Check Referer for cross-site requests. Disables requests to certain endpoints with external or missing Referer. If set, allow_origin settings are applied to the Referer to whitelist specific cross-origin sites. Used on GET for api endpoints and /files/ to block cross-site inclusion (XSSI). """ host = self.request.headers.get("Host") referer = self.request.headers.get("Referer") if not host: self.log.warning("Blocking request with no host") return False if not referer: self.log.warning("Blocking request with no referer") return False referer_url = urlparse(referer) referer_host = referer_url.netloc if referer_host == host: return True # apply cross-origin checks to Referer: origin = "{}://{}".format(referer_url.scheme, referer_url.netloc) if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS settings, deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin request for %s. Referer: %s, Host: %s", self.request.path, origin, host, ) return allow def check_xsrf_cookie(self): """Bypass xsrf cookie checks when token-authenticated""" if self.token_authenticated or self.settings.get('disable_check_xsrf', False): # Token-authenticated requests do not need additional XSRF-check # Servers without authentication are vulnerable to XSRF return try: return super(IPythonHandler, self).check_xsrf_cookie() except web.HTTPError as e: if self.request.method in {'GET', 'HEAD'}: # Consider Referer a sufficient cross-origin check for GET requests if not self.check_referer(): referer = self.request.headers.get('Referer') if referer: msg = "Blocking Cross Origin request from {}.".format(referer) else: msg = "Blocking request from unknown origin" raise web.HTTPError(403, msg) from e else: raise def check_host(self): """Check the host header if remote access disallowed. Returns True if the request should continue, False otherwise. """ if self.settings.get('allow_remote_access', False): return True # Remove port (e.g. ':8888') from host host = re.match(r'^(.*?)(:\d+)?$', self.request.host).group(1) # Browsers format IPv6 addresses like [::1]; we need to remove the [] if host.startswith('[') and host.endswith(']'): host = host[1:-1] # UNIX socket handling check_host = urldecode_unix_socket_path(host) if check_host.startswith('/') and os.path.exists(check_host): allow = True else: try: addr = ipaddress.ip_address(host) except ValueError: # Not an IP address: check against hostnames allow = host in self.settings.get('local_hostnames', ['localhost']) else: allow = addr.is_loopback if not allow: self.log.warning( ("Blocking request with non-local 'Host' %s (%s). " "If the notebook should be accessible at that name, " "set NotebookApp.allow_remote_access to disable the check."), host, self.request.host ) return allow def prepare(self): if not self.check_host(): raise web.HTTPError(403) return super(IPythonHandler, self).prepare() #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, default_url=self.default_url, ws_url=self.ws_url, logged_in=self.logged_in, allow_password_change=self.settings.get('allow_password_change'), login_available=self.login_available, token_available=bool(self.token), static_url=self.static_url, sys_info=json_sys_info(), contents_js_source=self.contents_js_source, version_hash=self.version_hash, ignore_minified_js=self.ignore_minified_js, xsrf_form_html=self.xsrf_form_html, token=self.token, xsrf_token=self.xsrf_token.decode('utf8'), nbjs_translations=json.dumps(combine_translations( self.request.headers.get('Accept-Language', ''))), **self.jinja_template_vars ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception as e: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') from e return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') exception = '(unknown)' if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: html = self.render_template('error.html', **ns) self.write(html) class APIHandler(IPythonHandler): """Base class for API handlers""" def prepare(self): if not self.check_origin(): raise web.HTTPError(404) return super(APIHandler, self).prepare() def write_error(self, status_code, **kwargs): """APIHandler errors are JSON, not human pages""" self.set_header('Content-Type', 'application/json') message = responses.get(status_code, 'Unknown HTTP Error') reply = { 'message': message, } exc_info = kwargs.get('exc_info') if exc_info: e = exc_info[1] if isinstance(e, HTTPError): reply['message'] = e.log_message or message reply['reason'] = e.reason else: reply['message'] = 'Unhandled error' reply['reason'] = None reply['traceback'] = ''.join(traceback.format_exception(*exc_info)) self.log.warning(reply['message']) self.finish(json.dumps(reply)) def get_current_user(self): """Raise 403 on API handlers instead of redirecting to human login page""" # preserve _user_cache so we don't raise more than once if hasattr(self, '_user_cache'): return self._user_cache self._user_cache = user = super(APIHandler, self).get_current_user() return user def get_login_url(self): # if get_login_url is invoked in an API handler, # that means @web.authenticated is trying to trigger a redirect. # instead of redirecting, raise 403 instead. if not self.current_user: raise web.HTTPError(403) return super(APIHandler, self).get_login_url() @property def content_security_policy(self): csp = '; '.join([ super(APIHandler, self).content_security_policy, "default-src 'none'", ]) return csp # set _track_activity = False on API handlers that shouldn't track activity _track_activity = True def update_api_activity(self): """Update last_activity of API requests""" # record activity of authenticated requests if ( self._track_activity and getattr(self, '_user_cache', None) and self.get_argument('no_track_activity', None) is None ): self.settings['api_last_activity'] = utcnow() def finish(self, *args, **kwargs): self.update_api_activity() self.set_header('Content-Type', 'application/json') return super(APIHandler, self).finish(*args, **kwargs) def options(self, *args, **kwargs): if 'Access-Control-Allow-Headers' in self.settings.get('headers', {}): self.set_header('Access-Control-Allow-Headers', self.settings['headers']['Access-Control-Allow-Headers']) else: self.set_header('Access-Control-Allow-Headers', 'accept, content-type, authorization, x-xsrftoken') self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, PATCH, DELETE, OPTIONS') # if authorization header is requested, # that means the request is token-authenticated. # avoid browser-side rejection of the preflight request. # only allow this exception if allow_origin has not been specified # and notebook authentication is enabled. # If the token is not valid, the 'real' request will still be rejected. requested_headers = self.request.headers.get('Access-Control-Request-Headers', '').split(',') if requested_headers and any( h.strip().lower() == 'authorization' for h in requested_headers ) and ( # FIXME: it would be even better to check specifically for token-auth, # but there is currently no API for this. self.login_available ) and ( self.allow_origin or self.allow_origin_pat or 'Access-Control-Allow-Origin' in self.settings.get('headers', {}) ): self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) class Template404(IPythonHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(IPythonHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the notebook server. return super(AuthenticatedFileHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def head(self, path): self.check_xsrf_cookie() return super(AuthenticatedFileHandler, self).head(path) @web.authenticated def get(self, path): self.check_xsrf_cookie() if os.path.splitext(path)[1] == '.ipynb' or self.get_argument("download", False): name = path.rsplit('/', 1)[-1] self.set_attachment_header(name) return web.StaticFileHandler.get(self, path) def get_content_type(self): path = self.absolute_path.strip('/') if '/' in path: _, name = path.rsplit('/', 1) else: name = path if name.endswith('.ipynb'): return 'application/x-ipynb+json' else: cur_mime = mimetypes.guess_type(name)[0] if cur_mime == 'text/plain': return 'text/plain; charset=UTF-8' else: return super(AuthenticatedFileHandler, self).get_content_type() def set_headers(self): super(AuthenticatedFileHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments: self.add_header("Cache-Control", "no-cache") def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root) and not self.contents_manager.allow_hidden: self.log.info("Refusing to serve hidden file, via 404 Error, use flag 'ContentsManager.allow_hidden' to enable") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ warnings.warn('@json_errors is deprecated in notebook 5.2.0. Subclass APIHandler instead.', DeprecationWarning, stacklevel=2, ) @functools.wraps(method) def wrapper(self, *args, **kwargs): self.write_error = types.MethodType(APIHandler.write_error, self) return method(self, *args, **kwargs) return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(IPythonHandler, web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def set_headers(self): super(FileFindHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments or \ any(self.request.path.startswith(path) for path in self.no_cache_paths): self.set_header("Cache-Control", "no-cache") def initialize(self, path, default_filename=None, no_cache_paths=None): self.no_cache_paths = no_cache_paths or [] if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath log().debug("Path %s served from %s"%(path, abspath)) return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class APIVersionHandler(APIHandler): def get(self): # not authenticated, so give as few info as possible self.finish(json.dumps({"version":notebook.__version__})) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ def get(self): self.redirect(self.request.uri.rstrip('/')) post = put = get class FilesRedirectHandler(IPythonHandler): """Handler for redirecting relative URLs to the /files/ handler""" @staticmethod def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', url_escape(path)) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warning("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', url_escape(path)) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url) def get(self, path=''): return self.redirect_to_files(self, path) class RedirectWithParams(web.RequestHandler): """Sam as web.RedirectHandler, but preserves URL parameters""" def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self): sep = '&' if '?' in self._url else '?' url = sep.join([self._url, self.request.query]) self.redirect(url, permanent=self._permanent) class PrometheusMetricsHandler(IPythonHandler): """ Return prometheus metrics for this notebook server """ @web.authenticated def get(self): self.set_header('Content-Type', prometheus_client.CONTENT_TYPE_LATEST) self.write(prometheus_client.generate_latest(prometheus_client.REGISTRY)) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- # path matches any number of `/foo[/bar...]` or just `/` or '' path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler), (r"api", APIVersionHandler), (r'/(robots\.txt|favicon\.ico)', web.StaticFileHandler), (r'/metrics', PrometheusMetricsHandler) ]
./CrossVul/dataset_final_sorted/CWE-601/py/bad_4332_1
crossvul-python_data_good_4351_0
"""Base Tornado handlers for the Jupyter server.""" # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import datetime import functools import ipaddress import json import mimetypes import os import re import sys import traceback import types import warnings from http.client import responses from http.cookies import Morsel from urllib.parse import urlparse from jinja2 import TemplateNotFound from tornado import web, gen, escape, httputil from tornado.log import app_log import prometheus_client from jupyter_server._sysinfo import get_sys_info from traitlets.config import Application from ipython_genutils.path import filefind from ipython_genutils.py3compat import string_types import jupyter_server from jupyter_server._tz import utcnow from jupyter_server.i18n import combine_translations from jupyter_server.utils import is_hidden, url_path_join, url_is_absolute, url_escape from jupyter_server.services.security import csp_report_uri #----------------------------------------------------------------------------- # Top-level handlers #----------------------------------------------------------------------------- non_alphanum = re.compile(r'[^A-Za-z0-9]') _sys_info_cache = None def json_sys_info(): global _sys_info_cache if _sys_info_cache is None: _sys_info_cache = json.dumps(get_sys_info()) return _sys_info_cache def log(): if Application.initialized(): return Application.instance().log else: return app_log class AuthenticatedHandler(web.RequestHandler): """A RequestHandler with an authenticated user.""" @property def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ if 'Content-Security-Policy' in self.settings.get('headers', {}): # user-specified, don't override return self.settings['headers']['Content-Security-Policy'] return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + self.settings.get('csp_report_uri', url_path_join(self.base_url, csp_report_uri)), ]) def set_default_headers(self): headers = {} headers.update(self.settings.get('headers', {})) headers["Content-Security-Policy"] = self.content_security_policy # Allow for overriding headers for header_name, value in headers.items(): try: self.set_header(header_name, value) except Exception as e: # tornado raise Exception (not a subclass) # if method is unsupported (websocket and Access-Control-Allow-Origin # for example, so just ignore) self.log.debug(e) def force_clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Tornado's cookie handling currently (Jan 2018) stores cookies in a dict keyed by name, so it can only modify one cookie with a given name per response. The browser can store multiple cookies with the same name but different domains and/or paths. This method lets us clear multiple cookies with the same name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). """ name = escape.native_str(name) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) morsel = Morsel() morsel.set(name, '', '""') morsel['expires'] = httputil.format_timestamp(expires) morsel['path'] = path if domain: morsel['domain'] = domain self.add_header("Set-Cookie", morsel.OutputString()) def clear_login_cookie(self): cookie_options = self.settings.get('cookie_options', {}) path = cookie_options.setdefault('path', self.base_url) self.clear_cookie(self.cookie_name, path=path) if path and path != '/': # also clear cookie on / to ensure old cookies are cleared # after the change in path behavior. # N.B. This bypasses the normal cookie handling, which can't update # two cookies with the same name. See the method above. self.force_clear_cookie(self.cookie_name) def get_current_user(self): if self.login_handler is None: return 'anonymous' return self.login_handler.get_user(self) def skip_check_origin(self): """Ask my login_handler if I should skip the origin_check For example: in the default LoginHandler, if a request is token-authenticated, origin checking should be skipped. """ if self.request.method == 'OPTIONS': # no origin-check on options requests, which are used to check origins! return True if self.login_handler is None or not hasattr(self.login_handler, 'should_check_origin'): return False return not self.login_handler.should_check_origin(self) @property def token_authenticated(self): """Have I been authenticated with a token?""" if self.login_handler is None or not hasattr(self.login_handler, 'is_token_authenticated'): return False return self.login_handler.is_token_authenticated(self) @property def cookie_name(self): default_cookie_name = non_alphanum.sub('-', 'username-{}'.format( self.request.host )) return self.settings.get('cookie_name', default_cookie_name) @property def logged_in(self): """Is a user currently logged in?""" user = self.get_current_user() return (user and not user == 'anonymous') @property def login_handler(self): """Return the login handler for this application, if any.""" return self.settings.get('login_handler_class', None) @property def token(self): """Return the login token for this application, if any.""" return self.settings.get('token', None) @property def login_available(self): """May a user proceed to log in? This returns True if login capability is available, irrespective of whether the user is already logged in or not. """ if self.login_handler is None: return False return bool(self.login_handler.get_login_available(self.settings)) class JupyterHandler(AuthenticatedHandler): """Jupyter-specific extensions to authenticated handling Mostly property shortcuts to Jupyter-specific settings. """ @property def config(self): return self.settings.get('config', None) @property def log(self): """use the Jupyter log by default, falling back on tornado's logger""" return log() @property def jinja_template_vars(self): """User-supplied values to supply to jinja templates.""" return self.settings.get('jinja_template_vars', {}) #--------------------------------------------------------------- # URLs #--------------------------------------------------------------- @property def version_hash(self): """The version hash to use for cache hints for static files""" return self.settings.get('version_hash', '') @property def mathjax_url(self): url = self.settings.get('mathjax_url', '') if not url or url_is_absolute(url): return url return url_path_join(self.base_url, url) @property def mathjax_config(self): return self.settings.get('mathjax_config', 'TeX-AMS-MML_HTMLorMML-full,Safe') @property def base_url(self): return self.settings.get('base_url', '/') @property def default_url(self): return self.settings.get('default_url', '') @property def ws_url(self): return self.settings.get('websocket_url', '') @property def contents_js_source(self): self.log.debug("Using contents: %s", self.settings.get('contents_js_source', 'services/contents')) return self.settings.get('contents_js_source', 'services/contents') #--------------------------------------------------------------- # Manager objects #--------------------------------------------------------------- @property def kernel_manager(self): return self.settings['kernel_manager'] @property def contents_manager(self): return self.settings['contents_manager'] @property def session_manager(self): return self.settings['session_manager'] @property def terminal_manager(self): return self.settings['terminal_manager'] @property def kernel_spec_manager(self): return self.settings['kernel_spec_manager'] @property def config_manager(self): return self.settings['config_manager'] #--------------------------------------------------------------- # CORS #--------------------------------------------------------------- @property def allow_origin(self): """Normal Access-Control-Allow-Origin""" return self.settings.get('allow_origin', '') @property def allow_origin_pat(self): """Regular expression version of allow_origin""" return self.settings.get('allow_origin_pat', None) @property def allow_credentials(self): """Whether to set Access-Control-Allow-Credentials""" return self.settings.get('allow_credentials', False) def set_default_headers(self): """Add CORS headers, if defined""" super(JupyterHandler, self).set_default_headers() if self.allow_origin: self.set_header("Access-Control-Allow-Origin", self.allow_origin) elif self.allow_origin_pat: origin = self.get_origin() if origin and self.allow_origin_pat.match(origin): self.set_header("Access-Control-Allow-Origin", origin) elif ( self.token_authenticated and "Access-Control-Allow-Origin" not in self.settings.get('headers', {}) ): # allow token-authenticated requests cross-origin by default. # only apply this exception if allow-origin has not been specified. self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) if self.allow_credentials: self.set_header("Access-Control-Allow-Credentials", 'true') def set_attachment_header(self, filename): """Set Content-Disposition: attachment header As a method to ensure handling of filename encoding """ escaped_filename = url_escape(filename) self.set_header('Content-Disposition', 'attachment;' " filename*=utf-8''{utf8}" .format( utf8=escaped_filename, ) ) def get_origin(self): # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if "Origin" in self.request.headers: origin = self.request.headers.get("Origin") else: origin = self.request.headers.get("Sec-Websocket-Origin", None) return origin # origin_to_satisfy_tornado is present because tornado requires # check_origin to take an origin argument, but we don't use it def check_origin(self, origin_to_satisfy_tornado=""): """Check Origin for cross-site API requests, including websockets Copied from WebSocket with changes: - allow unspecified host/origin (e.g. scripts) - allow token-authenticated requests """ if self.allow_origin == '*' or self.skip_check_origin(): return True host = self.request.headers.get("Host") origin = self.request.headers.get("Origin") # If no header is provided, let the request through. # Origin can be None for: # - same-origin (IE, Firefox) # - Cross-site POST form (IE, Firefox) # - Scripts # The cross-site POST (XSRF) case is handled by tornado's xsrf_token if origin is None or host is None: return True origin = origin.lower() origin_host = urlparse(origin).netloc # OK if origin matches host if origin_host == host: return True # Check CORS headers if self.allow_origin: allow = self.allow_origin == origin elif self.allow_origin_pat: allow = bool(self.allow_origin_pat.match(origin)) else: # No CORS headers deny the request allow = False if not allow: self.log.warning("Blocking Cross Origin API request for %s. Origin: %s, Host: %s", self.request.path, origin, host, ) return allow def check_xsrf_cookie(self): """Bypass xsrf cookie checks when token-authenticated""" if self.token_authenticated or self.settings.get('disable_check_xsrf', False): # Token-authenticated requests do not need additional XSRF-check # Servers without authentication are vulnerable to XSRF return return super(JupyterHandler, self).check_xsrf_cookie() def check_host(self): """Check the host header if remote access disallowed. Returns True if the request should continue, False otherwise. """ if self.settings.get('allow_remote_access', False): return True # Remove port (e.g. ':8888') from host host = re.match(r'^(.*?)(:\d+)?$', self.request.host).group(1) # Browsers format IPv6 addresses like [::1]; we need to remove the [] if host.startswith('[') and host.endswith(']'): host = host[1:-1] try: addr = ipaddress.ip_address(host) except ValueError: # Not an IP address: check against hostnames allow = host in self.settings.get('local_hostnames', ['localhost']) else: allow = addr.is_loopback if not allow: self.log.warning( ("Blocking request with non-local 'Host' %s (%s). " "If the server should be accessible at that name, " "set ServerApp.allow_remote_access to disable the check."), host, self.request.host ) return allow def prepare(self): if not self.check_host(): raise web.HTTPError(403) return super(JupyterHandler, self).prepare() #--------------------------------------------------------------- # template rendering #--------------------------------------------------------------- def get_template(self, name): """Return the jinja template object for a given name""" return self.settings['jinja2_env'].get_template(name) def render_template(self, name, **ns): ns.update(self.template_namespace) template = self.get_template(name) return template.render(**ns) @property def template_namespace(self): return dict( base_url=self.base_url, default_url=self.default_url, ws_url=self.ws_url, logged_in=self.logged_in, allow_password_change=self.settings.get('allow_password_change'), login_available=self.login_available, token_available=bool(self.token), static_url=self.static_url, sys_info=json_sys_info(), contents_js_source=self.contents_js_source, version_hash=self.version_hash, xsrf_form_html=self.xsrf_form_html, token=self.token, xsrf_token=self.xsrf_token.decode('utf8'), nbjs_translations=json.dumps(combine_translations( self.request.headers.get('Accept-Language', ''))), **self.jinja_template_vars ) def get_json_body(self): """Return the body of the request as JSON data.""" if not self.request.body: return None # Do we need to call body.decode('utf-8') here? body = self.request.body.strip().decode(u'utf-8') try: model = json.loads(body) except Exception as e: self.log.debug("Bad JSON: %r", body) self.log.error("Couldn't parse JSON", exc_info=True) raise web.HTTPError(400, u'Invalid JSON in body of request') from e return model def write_error(self, status_code, **kwargs): """render custom error pages""" exc_info = kwargs.get('exc_info') message = '' status_message = responses.get(status_code, 'Unknown HTTP Error') exception = '(unknown)' if exc_info: exception = exc_info[1] # get the custom message, if defined try: message = exception.log_message % exception.args except Exception: pass # construct the custom reason, if defined reason = getattr(exception, 'reason', '') if reason: status_message = reason # build template namespace ns = dict( status_code=status_code, status_message=status_message, message=message, exception=exception, ) self.set_header('Content-Type', 'text/html') # render the template try: html = self.render_template('%s.html' % status_code, **ns) except TemplateNotFound: html = self.render_template('error.html', **ns) self.write(html) class APIHandler(JupyterHandler): """Base class for API handlers""" def prepare(self): if not self.check_origin(): raise web.HTTPError(404) return super(APIHandler, self).prepare() def write_error(self, status_code, **kwargs): """APIHandler errors are JSON, not human pages""" self.set_header('Content-Type', 'application/json') message = responses.get(status_code, 'Unknown HTTP Error') reply = { 'message': message, } exc_info = kwargs.get('exc_info') if exc_info: e = exc_info[1] if isinstance(e, HTTPError): reply['message'] = e.log_message or message reply['reason'] = e.reason else: reply['message'] = 'Unhandled error' reply['reason'] = None reply['traceback'] = ''.join(traceback.format_exception(*exc_info)) self.log.warning(reply['message']) self.finish(json.dumps(reply)) def get_current_user(self): """Raise 403 on API handlers instead of redirecting to human login page""" # preserve _user_cache so we don't raise more than once if hasattr(self, '_user_cache'): return self._user_cache self._user_cache = user = super(APIHandler, self).get_current_user() return user def get_login_url(self): # if get_login_url is invoked in an API handler, # that means @web.authenticated is trying to trigger a redirect. # instead of redirecting, raise 403 instead. if not self.current_user: raise web.HTTPError(403) return super(APIHandler, self).get_login_url() @property def content_security_policy(self): csp = '; '.join([ super(APIHandler, self).content_security_policy, "default-src 'none'", ]) return csp # set _track_activity = False on API handlers that shouldn't track activity _track_activity = True def update_api_activity(self): """Update last_activity of API requests""" # record activity of authenticated requests if ( self._track_activity and getattr(self, '_user_cache', None) and self.get_argument('no_track_activity', None) is None ): self.settings['api_last_activity'] = utcnow() def finish(self, *args, **kwargs): self.update_api_activity() self.set_header('Content-Type', 'application/json') return super(APIHandler, self).finish(*args, **kwargs) def options(self, *args, **kwargs): if 'Access-Control-Allow-Headers' in self.settings.get('headers', {}): self.set_header('Access-Control-Allow-Headers', self.settings['headers']['Access-Control-Allow-Headers']) else: self.set_header('Access-Control-Allow-Headers', 'accept, content-type, authorization, x-xsrftoken') self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, PATCH, DELETE, OPTIONS') # if authorization header is requested, # that means the request is token-authenticated. # avoid browser-side rejection of the preflight request. # only allow this exception if allow_origin has not been specified # and Jupyter server authentication is enabled. # If the token is not valid, the 'real' request will still be rejected. requested_headers = self.request.headers.get('Access-Control-Request-Headers', '').split(',') if requested_headers and any( h.strip().lower() == 'authorization' for h in requested_headers ) and ( # FIXME: it would be even better to check specifically for token-auth, # but there is currently no API for this. self.login_available ) and ( self.allow_origin or self.allow_origin_pat or 'Access-Control-Allow-Origin' in self.settings.get('headers', {}) ): self.set_header('Access-Control-Allow-Origin', self.request.headers.get('Origin', '')) class Template404(JupyterHandler): """Render our 404 template""" def prepare(self): raise web.HTTPError(404) class AuthenticatedFileHandler(JupyterHandler, web.StaticFileHandler): """static files should only be accessible when logged in""" @property def content_security_policy(self): # In case we're serving HTML/SVG, confine any Javascript to a unique # origin so it can't interact with the Jupyter server. return super(AuthenticatedFileHandler, self).content_security_policy + \ "; sandbox allow-scripts" @web.authenticated def get(self, path): if os.path.splitext(path)[1] == '.ipynb' or self.get_argument("download", False): name = path.rsplit('/', 1)[-1] self.set_attachment_header(name) return web.StaticFileHandler.get(self, path) def get_content_type(self): path = self.absolute_path.strip('/') if '/' in path: _, name = path.rsplit('/', 1) else: name = path if name.endswith('.ipynb'): return 'application/x-ipynb+json' else: cur_mime = mimetypes.guess_type(name)[0] if cur_mime == 'text/plain': return 'text/plain; charset=UTF-8' else: return super(AuthenticatedFileHandler, self).get_content_type() def set_headers(self): super(AuthenticatedFileHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments: self.add_header("Cache-Control", "no-cache") def compute_etag(self): return None def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. Requires tornado 3.1 Adding to tornado's own handling, forbids the serving of hidden files. """ abs_path = super(AuthenticatedFileHandler, self).validate_absolute_path(root, absolute_path) abs_root = os.path.abspath(root) if is_hidden(abs_path, abs_root) and not self.contents_manager.allow_hidden: self.log.info("Refusing to serve hidden file, via 404 Error, use flag 'ContentsManager.allow_hidden' to enable") raise web.HTTPError(404) return abs_path def json_errors(method): """Decorate methods with this to return GitHub style JSON errors. This should be used on any JSON API on any handler method that can raise HTTPErrors. This will grab the latest HTTPError exception using sys.exc_info and then: 1. Set the HTTP status code based on the HTTPError 2. Create and return a JSON body with a message field describing the error in a human readable form. """ warnings.warn('@json_errors is deprecated in notebook 5.2.0. Subclass APIHandler instead.', DeprecationWarning, stacklevel=2, ) @functools.wraps(method) def wrapper(self, *args, **kwargs): self.write_error = types.MethodType(APIHandler.write_error, self) return method(self, *args, **kwargs) return wrapper #----------------------------------------------------------------------------- # File handler #----------------------------------------------------------------------------- # to minimize subclass changes: HTTPError = web.HTTPError class FileFindHandler(JupyterHandler, web.StaticFileHandler): """subclass of StaticFileHandler for serving files from a search path""" # cache search results, don't search for files more than once _static_paths = {} def set_headers(self): super(FileFindHandler, self).set_headers() # disable browser caching, rely on 304 replies for savings if "v" not in self.request.arguments or \ any(self.request.path.startswith(path) for path in self.no_cache_paths): self.set_header("Cache-Control", "no-cache") def initialize(self, path, default_filename=None, no_cache_paths=None): self.no_cache_paths = no_cache_paths or [] if isinstance(path, string_types): path = [path] self.root = tuple( os.path.abspath(os.path.expanduser(p)) + os.sep for p in path ) self.default_filename = default_filename def compute_etag(self): return None @classmethod def get_absolute_path(cls, roots, path): """locate a file to serve on our static file search path""" with cls._lock: if path in cls._static_paths: return cls._static_paths[path] try: abspath = os.path.abspath(filefind(path, roots)) except IOError: # IOError means not found return '' cls._static_paths[path] = abspath log().debug("Path %s served from %s"%(path, abspath)) return abspath def validate_absolute_path(self, root, absolute_path): """check if the file should be served (raises 404, 403, etc.)""" if absolute_path == '': raise web.HTTPError(404) for root in self.root: if (absolute_path + os.sep).startswith(root): break return super(FileFindHandler, self).validate_absolute_path(root, absolute_path) class APIVersionHandler(APIHandler): def get(self): # not authenticated, so give as few info as possible self.finish(json.dumps({"version": jupyter_server.__version__})) class TrailingSlashHandler(web.RequestHandler): """Simple redirect handler that strips trailing slashes This should be the first, highest priority handler. """ def get(self): path, *rest = self.request.uri.partition("?") # trim trailing *and* leading / # to avoid misinterpreting repeated '//' path = "/" + path.strip("/") new_uri = "".join([path, *rest]) self.redirect(new_uri) post = put = get class MainHandler(JupyterHandler): """Simple handler for base_url.""" def get(self): html = self.render_template("main.html") self.write(html) post = put = get class FilesRedirectHandler(JupyterHandler): """Handler for redirecting relative URLs to the /files/ handler""" @staticmethod def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', url_escape(path)) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warning("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', url_escape(path)) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url) def get(self, path=''): return self.redirect_to_files(self, path) class RedirectWithParams(web.RequestHandler): """Sam as web.RedirectHandler, but preserves URL parameters""" def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self): sep = '&' if '?' in self._url else '?' url = sep.join([self._url, self.request.query]) self.redirect(url, permanent=self._permanent) class PrometheusMetricsHandler(JupyterHandler): """ Return prometheus metrics for this Jupyter server """ @web.authenticated def get(self): self.set_header('Content-Type', prometheus_client.CONTENT_TYPE_LATEST) self.write(prometheus_client.generate_latest(prometheus_client.REGISTRY)) #----------------------------------------------------------------------------- # URL pattern fragments for re-use #----------------------------------------------------------------------------- # path matches any number of `/foo[/bar...]` or just `/` or '' path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))" #----------------------------------------------------------------------------- # URL to handler mappings #----------------------------------------------------------------------------- default_handlers = [ (r".*/", TrailingSlashHandler), (r"api", APIVersionHandler), (r'/(robots\.txt|favicon\.ico)', web.StaticFileHandler), (r'/metrics', PrometheusMetricsHandler) ]
./CrossVul/dataset_final_sorted/CWE-601/py/good_4351_0
crossvul-python_data_good_1731_1
#!/bin/python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Auth, Mail, PluginManager and various utilities ------------------------------------------------ """ import base64 try: import cPickle as pickle except: import pickle import datetime import thread import logging import sys import glob import os import re import time import traceback import smtplib import urllib import urllib2 import Cookie import cStringIO import ConfigParser import email.utils import random from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string, Charset from gluon.contenttype import contenttype from gluon.storage import Storage, StorageList, Settings, Messages from gluon.utils import web2py_uuid from gluon.fileutils import read_file, check_credentials from gluon import * from gluon.contrib.autolinks import expand_one from gluon.contrib.markmin.markmin2html import \ replace_at_urls, replace_autolinks, replace_components from pydal.objects import Row, Set, Query import gluon.serializers as serializers Table = DAL.Table Field = DAL.Field try: # try stdlib (Python 2.6) import json as json_parser except ImportError: try: # try external module import simplejson as json_parser except: # fallback to pure-Python module import gluon.contrib.simplejson as json_parser __all__ = ['Mail', 'Auth', 'Recaptcha', 'Recaptcha2', 'Crud', 'Service', 'Wiki', 'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate'] ### mind there are two loggers here (logger and crud.settings.logger)! logger = logging.getLogger("web2py") DEFAULT = lambda: None def getarg(position, default=None): args = current.request.args if position < 0 and len(args) >= -position: return args[position] elif position >= 0 and len(args) > position: return args[position] else: return default def callback(actions, form, tablename=None): if actions: if tablename and isinstance(actions, dict): actions = actions.get(tablename, []) if not isinstance(actions, (list, tuple)): actions = [actions] [action(form) for action in actions] def validators(*a): b = [] for item in a: if isinstance(item, (list, tuple)): b = b + list(item) else: b.append(item) return b def call_or_redirect(f, *args): if callable(f): redirect(f(*args)) else: redirect(f) def replace_id(url, form): if url: url = url.replace('[id]', str(form.vars.id)) if url[0] == '/' or url[:4] == 'http': return url return URL(url) class Mail(object): """ Class for configuring and sending emails with alternative text / html body, multiple attachments and encryption support Works with SMTP and Google App Engine. Args: server: SMTP server address in address:port notation sender: sender email address login: sender login name and password in login:password notation or None if no authentication is required tls: enables/disables encryption (True by default) In Google App Engine use :: server='gae' For sake of backward compatibility all fields are optional and default to None, however, to be able to send emails at least server and sender must be specified. They are available under following fields:: mail.settings.server mail.settings.sender mail.settings.login mail.settings.timeout = 60 # seconds (default) When server is 'logging', email is logged but not sent (debug mode) Optionally you can use PGP encryption or X509:: mail.settings.cipher_type = None mail.settings.gpg_home = None mail.settings.sign = True mail.settings.sign_passphrase = None mail.settings.encrypt = True mail.settings.x509_sign_keyfile = None mail.settings.x509_sign_certfile = None mail.settings.x509_sign_chainfile = None mail.settings.x509_nocerts = False mail.settings.x509_crypt_certfiles = None cipher_type : None gpg - need a python-pyme package and gpgme lib x509 - smime gpg_home : you can set a GNUPGHOME environment variable to specify home of gnupg sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message (True or False). It defaults to True ... x509 only ... x509_sign_keyfile : the signers private key filename or string containing the key. (PEM format) x509_sign_certfile: the signers certificate filename or string containing the cert. (PEM format) x509_sign_chainfile: sets the optional all-in-one file where you can assemble the certificates of Certification Authorities (CA) which form the certificate chain of email certificate. It can be a string containing the certs to. (PEM format) x509_nocerts : if True then no attached certificate in mail x509_crypt_certfiles: the certificates file or strings to encrypt the messages with can be a file name / string or a list of file names / strings (PEM format) Examples: Create Mail object with authentication data for remote server:: mail = Mail('example.com:25', 'me@example.com', 'me:password') Notice for GAE users: attachments have an automatic content_id='attachment-i' where i is progressive number in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc. """ class Attachment(MIMEBase.MIMEBase): """ Email attachment Args: payload: path to file or file-like object with read() method filename: name of the attachment stored in message; if set to None, it will be fetched from payload path; file-like object payload must have explicit filename specified content_id: id of the attachment; automatically contained within `<` and `>` content_type: content type of the attachment; if set to None, it will be fetched from filename using gluon.contenttype module encoding: encoding of all strings passed to this function (except attachment body) Content ID is used to identify attachments within the html body; in example, attached image with content ID 'photo' may be used in html message as a source of img tag `<img src="cid:photo" />`. Example:: Create attachment from text file:: attachment = Mail.Attachment('/path/to/file.txt') Content-Type: text/plain MIME-Version: 1.0 Content-Disposition: attachment; filename="file.txt" Content-Transfer-Encoding: base64 SOMEBASE64CONTENT= Create attachment from image file with custom filename and cid:: attachment = Mail.Attachment('/path/to/file.png', filename='photo.png', content_id='photo') Content-Type: image/png MIME-Version: 1.0 Content-Disposition: attachment; filename="photo.png" Content-Id: <photo> Content-Transfer-Encoding: base64 SOMEOTHERBASE64CONTENT= """ def __init__( self, payload, filename=None, content_id=None, content_type=None, encoding='utf-8'): if isinstance(payload, str): if filename is None: filename = os.path.basename(payload) payload = read_file(payload, 'rb') else: if filename is None: raise Exception('Missing attachment name') payload = payload.read() filename = filename.encode(encoding) if content_type is None: content_type = contenttype(filename) self.my_filename = filename self.my_payload = payload MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1)) self.set_payload(payload) self['Content-Disposition'] = 'attachment; filename="%s"' % filename if not content_id is None: self['Content-Id'] = '<%s>' % content_id.encode(encoding) Encoders.encode_base64(self) def __init__(self, server=None, sender=None, login=None, tls=True): settings = self.settings = Settings() settings.server = server settings.sender = sender settings.login = login settings.tls = tls settings.timeout = 60 # seconds settings.hostname = None settings.ssl = False settings.cipher_type = None settings.gpg_home = None settings.sign = True settings.sign_passphrase = None settings.encrypt = True settings.x509_sign_keyfile = None settings.x509_sign_certfile = None settings.x509_sign_chainfile = None settings.x509_nocerts = False settings.x509_crypt_certfiles = None settings.debug = False settings.lock_keys = True self.result = {} self.error = None def send(self, to, subject='[no subject]', message='[no message]', attachments=None, cc=None, bcc=None, reply_to=None, sender=None, encoding='utf-8', raw=False, headers={}, from_address=None, cipher_type=None, sign=None, sign_passphrase=None, encrypt=None, x509_sign_keyfile=None, x509_sign_chainfile=None, x509_sign_certfile=None, x509_crypt_certfiles=None, x509_nocerts=None ): """ Sends an email using data specified in constructor Args: to: list or tuple of receiver addresses; will also accept single object subject: subject of the email message: email body text; depends on type of passed object: - if 2-list or 2-tuple is passed: first element will be source of plain text while second of html text; - otherwise: object will be the only source of plain text and html source will be set to None If text or html source is: - None: content part will be ignored, - string: content part will be set to it, - file-like object: content part will be fetched from it using it's read() method attachments: list or tuple of Mail.Attachment objects; will also accept single object cc: list or tuple of carbon copy receiver addresses; will also accept single object bcc: list or tuple of blind carbon copy receiver addresses; will also accept single object reply_to: address to which reply should be composed encoding: encoding of all strings passed to this method (including message bodies) headers: dictionary of headers to refine the headers just before sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}` from_address: address to appear in the 'From:' header, this is not the envelope sender. If not specified the sender will be used cipher_type : gpg - need a python-pyme package and gpgme lib x509 - smime gpg_home : you can set a GNUPGHOME environment variable to specify home of gnupg sign : sign the message (True or False) sign_passphrase : passphrase for key signing encrypt : encrypt the message (True or False). It defaults to True. ... x509 only ... x509_sign_keyfile : the signers private key filename or string containing the key. (PEM format) x509_sign_certfile: the signers certificate filename or string containing the cert. (PEM format) x509_sign_chainfile: sets the optional all-in-one file where you can assemble the certificates of Certification Authorities (CA) which form the certificate chain of email certificate. It can be a string containing the certs to. (PEM format) x509_nocerts : if True then no attached certificate in mail x509_crypt_certfiles: the certificates file or strings to encrypt the messages with can be a file name / string or a list of file names / strings (PEM format) Examples: Send plain text message to single address:: mail.send('you@example.com', 'Message subject', 'Plain text body of the message') Send html message to single address:: mail.send('you@example.com', 'Message subject', '<html>Plain text body of the message</html>') Send text and html message to three addresses (two in cc):: mail.send('you@example.com', 'Message subject', ('Plain text body', '<html>html body</html>'), cc=['other1@example.com', 'other2@example.com']) Send html only message with image attachment available from the message by 'photo' content id:: mail.send('you@example.com', 'Message subject', (None, '<html><img src="cid:photo" /></html>'), Mail.Attachment('/path/to/photo.jpg' content_id='photo')) Send email with two attachments and no body text:: mail.send('you@example.com, 'Message subject', None, [Mail.Attachment('/path/to/fist.file'), Mail.Attachment('/path/to/second.file')]) Returns: True on success, False on failure. Before return, method updates two object's fields: - self.result: return value of smtplib.SMTP.sendmail() or GAE's mail.send_mail() method - self.error: Exception message or None if above was successful """ # We don't want to use base64 encoding for unicode mail Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8') def encode_header(key): if [c for c in key if 32 > ord(c) or ord(c) > 127]: return Header.Header(key.encode('utf-8'), 'utf-8') else: return key # encoded or raw text def encoded_or_raw(text): if raw: text = encode_header(text) return text sender = sender or self.settings.sender if not isinstance(self.settings.server, str): raise Exception('Server address not specified') if not isinstance(sender, str): raise Exception('Sender address not specified') if not raw and attachments: # Use multipart/mixed if there is attachments payload_in = MIMEMultipart.MIMEMultipart('mixed') elif raw: # no encoding configuration for raw messages if not isinstance(message, basestring): message = message.read() if isinstance(message, unicode): text = message.encode('utf-8') elif not encoding == 'utf-8': text = message.decode(encoding).encode('utf-8') else: text = message # No charset passed to avoid transport encoding # NOTE: some unicode encoded strings will produce # unreadable mail contents. payload_in = MIMEText.MIMEText(text) if to: if not isinstance(to, (list, tuple)): to = [to] else: raise Exception('Target receiver address not specified') if cc: if not isinstance(cc, (list, tuple)): cc = [cc] if bcc: if not isinstance(bcc, (list, tuple)): bcc = [bcc] if message is None: text = html = None elif isinstance(message, (list, tuple)): text, html = message elif message.strip().startswith('<html') and \ message.strip().endswith('</html>'): text = self.settings.server == 'gae' and message or None html = message else: text = message html = None if (not text is None or not html is None) and (not raw): if not text is None: if not isinstance(text, basestring): text = text.read() if isinstance(text, unicode): text = text.encode('utf-8') elif not encoding == 'utf-8': text = text.decode(encoding).encode('utf-8') if not html is None: if not isinstance(html, basestring): html = html.read() if isinstance(html, unicode): html = html.encode('utf-8') elif not encoding == 'utf-8': html = html.decode(encoding).encode('utf-8') # Construct mime part only if needed if text is not None and html: # We have text and html we need multipart/alternative attachment = MIMEMultipart.MIMEMultipart('alternative') attachment.attach(MIMEText.MIMEText(text, _charset='utf-8')) attachment.attach( MIMEText.MIMEText(html, 'html', _charset='utf-8')) elif text is not None: attachment = MIMEText.MIMEText(text, _charset='utf-8') elif html: attachment = \ MIMEText.MIMEText(html, 'html', _charset='utf-8') if attachments: # If there is attachments put text and html into # multipart/mixed payload_in.attach(attachment) else: # No attachments no multipart/mixed payload_in = attachment if (attachments is None) or raw: pass elif isinstance(attachments, (list, tuple)): for attachment in attachments: payload_in.attach(attachment) else: payload_in.attach(attachments) ####################################################### # CIPHER # ####################################################### cipher_type = cipher_type or self.settings.cipher_type sign = sign if sign != None else self.settings.sign sign_passphrase = sign_passphrase or self.settings.sign_passphrase encrypt = encrypt if encrypt != None else self.settings.encrypt ####################################################### # GPGME # ####################################################### if cipher_type == 'gpg': if self.settings.gpg_home: # Set GNUPGHOME environment variable to set home of gnupg import os os.environ['GNUPGHOME'] = self.settings.gpg_home if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to gpg" return False # need a python-pyme package and gpgme lib from pyme import core, errors from pyme.constants.sig import mode ############################################ # sign # ############################################ if sign: import string core.check_version(None) pin = string.replace(payload_in.as_string(), '\n', '\r\n') plain = core.Data(pin) sig = core.Data() c = core.Context() c.set_armor(1) c.signers_clear() # search for signing key for From: for sigkey in c.op_keylist_all(sender, 1): if sigkey.can_sign: c.signers_add(sigkey) if not c.signers_enum(0): self.error = 'No key for signing [%s]' % sender return False c.set_passphrase_cb(lambda x, y, z: sign_passphrase) try: # make a signature c.op_sign(plain, sig, mode.DETACH) sig.seek(0, 0) # make it part of the email payload = MIMEMultipart.MIMEMultipart('signed', boundary=None, _subparts=None, **dict( micalg="pgp-sha1", protocol="application/pgp-signature")) # insert the origin payload payload.attach(payload_in) # insert the detached signature p = MIMEBase.MIMEBase("application", 'pgp-signature') p.set_payload(sig.read()) payload.attach(p) # it's just a trick to handle the no encryption case payload_in = payload except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ############################################ # encrypt # ############################################ if encrypt: core.check_version(None) plain = core.Data(payload_in.as_string()) cipher = core.Data() c = core.Context() c.set_armor(1) # collect the public keys for encryption recipients = [] rec = to[:] if cc: rec.extend(cc) if bcc: rec.extend(bcc) for addr in rec: c.op_keylist_start(addr, 0) r = c.op_keylist_next() if r is None: self.error = 'No key for [%s]' % addr return False recipients.append(r) try: # make the encryption c.op_encrypt(recipients, 1, plain, cipher) cipher.seek(0, 0) # make it a part of the email payload = MIMEMultipart.MIMEMultipart('encrypted', boundary=None, _subparts=None, **dict(protocol="application/pgp-encrypted")) p = MIMEBase.MIMEBase("application", 'pgp-encrypted') p.set_payload("Version: 1\r\n") payload.attach(p) p = MIMEBase.MIMEBase("application", 'octet-stream') p.set_payload(cipher.read()) payload.attach(p) except errors.GPGMEError, ex: self.error = "GPG error: %s" % ex.getstring() return False ####################################################### # X.509 # ####################################################### elif cipher_type == 'x509': if not sign and not encrypt: self.error = "No sign and no encrypt is set but cipher type to x509" return False import os x509_sign_keyfile = x509_sign_keyfile or\ self.settings.x509_sign_keyfile x509_sign_chainfile = x509_sign_chainfile or\ self.settings.x509_sign_chainfile x509_sign_certfile = x509_sign_certfile or\ self.settings.x509_sign_certfile or\ x509_sign_keyfile or\ self.settings.x509_sign_certfile # crypt certfiles could be a string or a list x509_crypt_certfiles = x509_crypt_certfiles or\ self.settings.x509_crypt_certfiles x509_nocerts = x509_nocerts or\ self.settings.x509_nocerts # need m2crypto try: from M2Crypto import BIO, SMIME, X509 except Exception, e: self.error = "Can't load M2Crypto module" return False msg_bio = BIO.MemoryBuffer(payload_in.as_string()) s = SMIME.SMIME() # SIGN if sign: # key for signing try: keyfile_bio = BIO.openfile(x509_sign_keyfile)\ if os.path.isfile(x509_sign_keyfile)\ else BIO.MemoryBuffer(x509_sign_keyfile) sign_certfile_bio = BIO.openfile(x509_sign_certfile)\ if os.path.isfile(x509_sign_certfile)\ else BIO.MemoryBuffer(x509_sign_certfile) s.load_key_bio(keyfile_bio, sign_certfile_bio, callback=lambda x: sign_passphrase) if x509_sign_chainfile: sk = X509.X509_Stack() chain = X509.load_cert(x509_sign_chainfile)\ if os.path.isfile(x509_sign_chainfile)\ else X509.load_cert_string(x509_sign_chainfile) sk.push(chain) s.set_x509_stack(sk) except Exception, e: self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e) return False try: if x509_nocerts: flags = SMIME.PKCS7_NOCERTS else: flags = 0 if not encrypt: flags += SMIME.PKCS7_DETACHED p7 = s.sign(msg_bio, flags=flags) msg_bio = BIO.MemoryBuffer(payload_in.as_string( )) # Recreate coz sign() has consumed it. except Exception, e: self.error = "Something went wrong on signing: <%s> %s" % ( str(e), str(flags)) return False # ENCRYPT if encrypt: try: sk = X509.X509_Stack() if not isinstance(x509_crypt_certfiles, (list, tuple)): x509_crypt_certfiles = [x509_crypt_certfiles] # make an encryption cert's stack for crypt_certfile in x509_crypt_certfiles: certfile = X509.load_cert(crypt_certfile)\ if os.path.isfile(crypt_certfile)\ else X509.load_cert_string(crypt_certfile) sk.push(certfile) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp_bio = BIO.MemoryBuffer() if sign: s.write(tmp_bio, p7) else: tmp_bio.write(payload_in.as_string()) p7 = s.encrypt(tmp_bio) except Exception, e: self.error = "Something went wrong on encrypting: <%s>" % str(e) return False # Final stage in sign and encryption out = BIO.MemoryBuffer() if encrypt: s.write(out, p7) else: if sign: s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED) else: out.write('\r\n') out.write(payload_in.as_string()) out.close() st = str(out.read()) payload = message_from_string(st) else: # no cryptography process as usual payload = payload_in if from_address: payload['From'] = encoded_or_raw(from_address.decode(encoding)) else: payload['From'] = encoded_or_raw(sender.decode(encoding)) origTo = to[:] if to: payload['To'] = encoded_or_raw(', '.join(to).decode(encoding)) if reply_to: payload['Reply-To'] = encoded_or_raw(reply_to.decode(encoding)) if cc: payload['Cc'] = encoded_or_raw(', '.join(cc).decode(encoding)) to.extend(cc) if bcc: to.extend(bcc) payload['Subject'] = encoded_or_raw(subject.decode(encoding)) payload['Date'] = email.utils.formatdate() for k, v in headers.iteritems(): payload[k] = encoded_or_raw(v.decode(encoding)) result = {} try: if self.settings.server == 'logging': logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % ('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)) elif self.settings.server == 'gae': xcc = dict() if cc: xcc['cc'] = cc if bcc: xcc['bcc'] = bcc if reply_to: xcc['reply_to'] = reply_to from google.appengine.api import mail attachments = attachments and [mail.Attachment( a.my_filename, a.my_payload, contebt_id='<attachment-%s>' % k ) for k,a in enumerate(attachments) if not raw] if attachments: result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), html=html, attachments=attachments, **xcc) elif html and (not raw): result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), html=html, **xcc) else: result = mail.send_mail( sender=sender, to=origTo, subject=unicode(subject), body=unicode(text), **xcc) else: smtp_args = self.settings.server.split(':') kwargs = dict(timeout=self.settings.timeout) if self.settings.ssl: server = smtplib.SMTP_SSL(*smtp_args, **kwargs) else: server = smtplib.SMTP(*smtp_args, **kwargs) if self.settings.tls and not self.settings.ssl: server.ehlo(self.settings.hostname) server.starttls() server.ehlo(self.settings.hostname) if self.settings.login: server.login(*self.settings.login.split(':', 1)) result = server.sendmail( sender, to, payload.as_string()) server.quit() except Exception, e: logger.warn('Mail.send failure:%s' % e) self.result = result self.error = e return False self.result = result self.error = None return True class Recaptcha(DIV): """ Examples: Use as:: form = FORM(Recaptcha(public_key='...',private_key='...')) or:: form = SQLFORM(...) form.append(Recaptcha(public_key='...',private_key='...')) """ API_SSL_SERVER = 'https://www.google.com/recaptcha/api' API_SERVER = 'http://www.google.com/recaptcha/api' VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify' def __init__(self, request=None, public_key='', private_key='', use_ssl=False, error=None, error_message='invalid', label='Verify:', options='', comment='', ajax=False ): request = request or current.request self.request_vars = request and request.vars or current.request.vars self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.use_ssl = use_ssl self.error = error self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options self.comment = comment self.ajax = ajax def _validate(self): # for local testing: recaptcha_challenge_field = \ self.request_vars.recaptcha_challenge_field recaptcha_response_field = \ self.request_vars.recaptcha_response_field private_key = self.private_key remoteip = self.remote_addr if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'privatekey': private_key, 'remoteip': remoteip, 'challenge': recaptcha_challenge_field, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) return_values = httpresp.read().splitlines() httpresp.close() return_code = return_values[0] if return_code == 'true': del self.request_vars.recaptcha_challenge_field del self.request_vars.recaptcha_response_field self.request_vars.captcha = '' return True else: # In case we get an error code, store it so we can get an error message # from the /api/challenge URL as described in the reCAPTCHA api docs. self.error = return_values[1] self.errors['captcha'] = self.error_message return False def xml(self): public_key = self.public_key use_ssl = self.use_ssl error_param = '' if self.error: error_param = '&error=%s' % self.error if use_ssl: server = self.API_SSL_SERVER else: server = self.API_SERVER if not self.ajax: captcha = DIV( SCRIPT("var RecaptchaOptions = {%s};" % self.options), SCRIPT(_type="text/javascript", _src="%s/challenge?k=%s%s" % (server, public_key, error_param)), TAG.noscript( IFRAME( _src="%s/noscript?k=%s%s" % ( server, public_key, error_param), _height="300", _width="500", _frameborder="0"), BR(), INPUT( _type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') else: #use Google's ajax interface, needed for LOADed components url_recaptcha_js = "%s/js/recaptcha_ajax.js" % server RecaptchaOptions = "var RecaptchaOptions = {%s}" % self.options script = """%(options)s; jQuery.getScript('%(url)s',function() { Recaptcha.create('%(public_key)s', 'recaptcha',jQuery.extend(RecaptchaOptions,{'callback':Recaptcha.focus_response_field})) }) """ % ({'options': RecaptchaOptions, 'url': url_recaptcha_js, 'public_key': public_key}) captcha = DIV( SCRIPT( script, _type="text/javascript", ), TAG.noscript( IFRAME( _src="%s/noscript?k=%s%s" % ( server, public_key, error_param), _height="300", _width="500", _frameborder="0"), BR(), INPUT( _type='hidden', _name='recaptcha_response_field', _value='manual_challenge')), _id='recaptcha') if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() class Recaptcha2(DIV): """ Experimental: Creates a DIV holding the newer Recaptcha from Google (v2) Args: request : the request. If not passed, uses current request public_key : the public key Google gave you private_key : the private key Google gave you error_message : the error message to show if verification fails label : the label to use options (dict) : takes these parameters - hl - theme - type - tabindex - callback - expired-callback see https://developers.google.com/recaptcha/docs/display for docs about those comment : the comment Examples: Use as:: form = FORM(Recaptcha2(public_key='...',private_key='...')) or:: form = SQLFORM(...) form.append(Recaptcha2(public_key='...',private_key='...')) to protect the login page instead, use:: from gluon.tools import Recaptcha2 auth.settings.captcha = Recaptcha2(request, public_key='...',private_key='...') """ API_URI = 'https://www.google.com/recaptcha/api.js' VERIFY_SERVER = 'https://www.google.com/recaptcha/api/siteverify' def __init__(self, request=None, public_key='', private_key='', error_message='invalid', label='Verify:', options=None, comment='', ): request = request or current.request self.request_vars = request and request.vars or current.request.vars self.remote_addr = request.env.remote_addr self.public_key = public_key self.private_key = private_key self.errors = Storage() self.error_message = error_message self.components = [] self.attributes = {} self.label = label self.options = options or {} self.comment = comment def _validate(self): recaptcha_response_field = self.request_vars.pop('g-recaptcha-response', None) remoteip = self.remote_addr if not recaptcha_response_field: self.errors['captcha'] = self.error_message return False params = urllib.urlencode({ 'secret': self.private_key, 'remoteip': remoteip, 'response': recaptcha_response_field, }) request = urllib2.Request( url=self.VERIFY_SERVER, data=params, headers={'Content-type': 'application/x-www-form-urlencoded', 'User-agent': 'reCAPTCHA Python'}) httpresp = urllib2.urlopen(request) content = httpresp.read() httpresp.close() try: response_dict = json_parser.loads(content) except: self.errors['captcha'] = self.error_message return False if response_dict.get('success', False): self.request_vars.captcha = '' return True else: self.errors['captcha'] = self.error_message return False def xml(self): api_uri = self.API_URI hl = self.options.pop('hl', None) if hl: api_uri = self.API_URI + '?hl=%s' % hl public_key = self.public_key self.options['sitekey'] = public_key captcha = DIV( SCRIPT(_src=api_uri, _async='', _defer=''), DIV(_class="g-recaptcha", data=self.options), TAG.noscript(XML(""" <div style="width: 302px; height: 352px;"> <div style="width: 302px; height: 352px; position: relative;"> <div style="width: 302px; height: 352px; position: absolute;"> <iframe src="https://www.google.com/recaptcha/api/fallback?k=%(public_key)s" frameborder="0" scrolling="no" style="width: 302px; height:352px; border-style: none;"> </iframe> </div> <div style="width: 250px; height: 80px; position: absolute; border-style: none; bottom: 21px; left: 25px; margin: 0px; padding: 0px; right: 25px;"> <textarea id="g-recaptcha-response" name="g-recaptcha-response" class="g-recaptcha-response" style="width: 250px; height: 80px; border: 1px solid #c1c1c1; margin: 0px; padding: 0px; resize: none;" value=""> </textarea> </div> </div> </div>""" % dict(public_key=public_key)) ) ) if not self.errors.captcha: return XML(captcha).xml() else: captcha.append(DIV(self.errors['captcha'], _class='error')) return XML(captcha).xml() # this should only be used for captcha and perhaps not even for that def addrow(form, a, b, c, style, _id, position=-1): if style == "divs": form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "table2cols": form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(c, _class='w2p_fc'))) form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'), _colspan=2, _id=_id)) elif style == "ul": form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'), DIV(b, _class='w2p_fw'), DIV(c, _class='w2p_fc'), _id=_id)) elif style == "bootstrap": form[0].insert(position, DIV(LABEL(a, _class='control-label'), DIV(b, SPAN(c, _class='inline-help'), _class='controls'), _class='control-group', _id=_id)) elif style == "bootstrap3_inline": form[0].insert(position, DIV(LABEL(a, _class='control-label col-sm-3'), DIV(b, SPAN(c, _class='help-block'), _class='col-sm-9'), _class='form-group', _id=_id)) elif style == "bootstrap3_stacked": form[0].insert(position, DIV(LABEL(a, _class='control-label'), b, SPAN(c, _class='help-block'), _class='form-group', _id=_id)) else: form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'), TD(b, _class='w2p_fw'), TD(c, _class='w2p_fc'), _id=_id)) class Auth(object): default_settings = dict( hideerror=False, password_min_length=4, cas_maps=None, reset_password_requires_verification=False, registration_requires_verification=False, registration_requires_approval=False, bulk_register_enabled=False, login_after_registration=False, login_after_password_change=True, alternate_requires_registration=False, create_user_groups="user_%(id)s", everybody_group_id=None, manager_actions={}, auth_manager_role=None, two_factor_authentication_group = None, login_captcha=None, register_captcha=None, pre_registration_div=None, retrieve_username_captcha=None, retrieve_password_captcha=None, captcha=None, prevent_open_redirect_attacks=True, prevent_password_reset_attacks=True, expiration=3600, # one hour long_expiration=3600 * 30 * 24, # one month remember_me_form=True, allow_basic_login=False, allow_basic_login_only=False, on_failed_authentication=lambda x: redirect(x), formstyle=None, label_separator=None, logging_enabled = True, allow_delete_accounts=False, password_field='password', table_user_name='auth_user', table_group_name='auth_group', table_membership_name='auth_membership', table_permission_name='auth_permission', table_event_name='auth_event', table_cas_name='auth_cas', table_token_name='auth_token', table_user=None, table_group=None, table_membership=None, table_permission=None, table_event=None, table_cas=None, showid=False, use_username=False, login_email_validate=True, login_userfield=None, multi_login=False, logout_onlogout=None, register_fields=None, register_verify_password=True, profile_fields=None, email_case_sensitive=True, username_case_sensitive=True, update_fields=['email'], ondelete="CASCADE", client_side=True, renew_session_onlogin=True, renew_session_onlogout=True, keep_session_onlogin=True, keep_session_onlogout=False, wiki=Settings(), ) # ## these are messages that can be customized default_messages = dict( login_button='Log In', register_button='Sign Up', password_reset_button='Request reset password', password_change_button='Change password', profile_save_button='Apply changes', submit_button='Submit', verify_password='Verify Password', delete_label='Check to delete', function_disabled='Function disabled', access_denied='Insufficient privileges', registration_verifying='Registration needs verification', registration_pending='Registration is pending approval', email_taken='This email already has an account', invalid_username='Invalid username', username_taken='Username already taken', login_disabled='Login disabled by administrator', logged_in='Logged in', email_sent='Email sent', unable_to_send_email='Unable to send email', email_verified='Email verified', logged_out='Logged out', registration_successful='Registration successful', invalid_email='Invalid email', unable_send_email='Unable to send email', invalid_login='Invalid login', invalid_user='Invalid user', invalid_password='Invalid password', is_empty="Cannot be empty", mismatched_password="Password fields don't match", verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email', verify_email_subject='Email verification', username_sent='Your username was emailed to you', new_password_sent='A new password was emailed to you', password_changed='Password changed', retrieve_username='Your username is: %(username)s', retrieve_username_subject='Username retrieve', retrieve_password='Your password is: %(password)s', retrieve_password_subject='Password retrieve', reset_password='Click on the link %(link)s to reset your password', reset_password_subject='Password reset', bulk_invite_subject='Invitation to join%(site)s', bulk_invite_body='You have been invited to join %(site)s, click %(link)s to complete the process', invalid_reset_password='Invalid reset password', profile_updated='Profile updated', new_password='New password', old_password='Old password', group_description='Group uniquely assigned to user %(id)s', register_log='User %(id)s Registered', login_log='User %(id)s Logged-in', login_failed_log=None, logout_log='User %(id)s Logged-out', profile_log='User %(id)s Profile updated', verify_email_log='User %(id)s Verification email sent', retrieve_username_log='User %(id)s Username retrieved', retrieve_password_log='User %(id)s Password retrieved', reset_password_log='User %(id)s Password reset', change_password_log='User %(id)s Password changed', add_group_log='Group %(group_id)s created', del_group_log='Group %(group_id)s deleted', add_membership_log=None, del_membership_log=None, has_membership_log=None, add_permission_log=None, del_permission_log=None, has_permission_log=None, impersonate_log='User %(id)s is impersonating %(other_id)s', label_first_name='First name', label_last_name='Last name', label_username='Username', label_email='E-mail', label_password='Password', label_registration_key='Registration key', label_reset_password_key='Reset Password key', label_registration_id='Registration identifier', label_role='Role', label_description='Description', label_user_id='User ID', label_group_id='Group ID', label_name='Name', label_table_name='Object or table name', label_record_id='Record ID', label_time_stamp='Timestamp', label_client_ip='Client IP', label_origin='Origin', label_remember_me="Remember me (for 30 days)", verify_password_comment='please input your password again', ) """ Class for authentication, authorization, role based access control. Includes: - registration and profile - login and logout - username and password retrieval - event logging - role creation and assignment - user defined group/role based permission Args: environment: is there for legacy but unused (awful) db: has to be the database where to create tables for authentication mailer: `Mail(...)` or None (no mailer) or True (make a mailer) hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key() controller: (where is the user action?) cas_provider: (delegate authentication to the URL, CAS2) Authentication Example:: from gluon.contrib.utils import * mail=Mail() mail.settings.server='smtp.gmail.com:587' mail.settings.sender='you@somewhere.com' mail.settings.login='username:password' auth=Auth(db) auth.settings.mailer=mail # auth.settings....=... auth.define_tables() def authentication(): return dict(form=auth()) Exposes: - `http://.../{application}/{controller}/authentication/login` - `http://.../{application}/{controller}/authentication/logout` - `http://.../{application}/{controller}/authentication/register` - `http://.../{application}/{controller}/authentication/verify_email` - `http://.../{application}/{controller}/authentication/retrieve_username` - `http://.../{application}/{controller}/authentication/retrieve_password` - `http://.../{application}/{controller}/authentication/reset_password` - `http://.../{application}/{controller}/authentication/profile` - `http://.../{application}/{controller}/authentication/change_password` On registration a group with role=new_user.id is created and user is given membership of this group. You can create a group with:: group_id=auth.add_group('Manager', 'can access the manage action') auth.add_permission(group_id, 'access to manage') Here "access to manage" is just a user defined string. You can give access to a user:: auth.add_membership(group_id, user_id) If user id is omitted, the logged in user is assumed Then you can decorate any action:: @auth.requires_permission('access to manage') def manage(): return dict() You can restrict a permission to a specific table:: auth.add_permission(group_id, 'edit', db.sometable) @auth.requires_permission('edit', db.sometable) Or to a specific record:: auth.add_permission(group_id, 'edit', db.sometable, 45) @auth.requires_permission('edit', db.sometable, 45) If authorization is not granted calls:: auth.settings.on_failed_authorization Other options:: auth.settings.mailer=None auth.settings.expiration=3600 # seconds ... ### these are messages that can be customized ... """ @staticmethod def get_or_create_key(filename=None, alg='sha512'): request = current.request if not filename: filename = os.path.join(request.folder, 'private', 'auth.key') if os.path.exists(filename): key = open(filename, 'r').read().strip() else: key = alg + ':' + web2py_uuid() open(filename, 'w').write(key) return key def url(self, f=None, args=None, vars=None, scheme=False): if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars, scheme=scheme) def here(self): return URL(args=current.request.args, vars=current.request.get_vars) def __init__(self, environment=None, db=None, mailer=True, hmac_key=None, controller='default', function='user', cas_provider=None, signature=True, secure=False, csrf_prevention=True, propagate_extension=None, url_index=None): ## next two lines for backward compatibility if not db and environment and isinstance(environment, DAL): db = environment self.db = db self.environment = current self.csrf_prevention = csrf_prevention request = current.request session = current.session auth = session.auth self.user_groups = auth and auth.user_groups or {} if secure: request.requires_https() now = request.now # if we have auth info # if not expired it, used it # if expired, clear the session # else, only clear auth info in the session if auth: delta = datetime.timedelta(days=0, seconds=auth.expiration) if auth.last_visit and auth.last_visit + delta > now: self.user = auth.user # this is a trick to speed up sessions to avoid many writes if (now - auth.last_visit).seconds > (auth.expiration / 10): auth.last_visit = request.now else: self.user = None if session.auth: del session.auth session.renew(clear_session=True) else: self.user = None if session.auth: del session.auth # ## what happens after login? url_index = url_index or URL(controller, 'index') url_login = URL(controller, function, args='login', extension = propagate_extension) # ## what happens after registration? settings = self.settings = Settings() settings.update(Auth.default_settings) settings.update( cas_domains=[request.env.http_host], enable_tokens=False, cas_provider=cas_provider, cas_actions=dict(login='login', validate='validate', servicevalidate='serviceValidate', proxyvalidate='proxyValidate', logout='logout'), extra_fields={}, actions_disabled=[], controller=controller, function=function, login_url=url_login, logged_url=URL(controller, function, args='profile'), download_url=URL(controller, 'download'), mailer=(mailer is True) and Mail() or mailer, on_failed_authorization = URL(controller, function, args='not_authorized'), login_next = url_index, login_onvalidation = [], login_onaccept = [], login_onfail = [], login_methods = [self], login_form = self, logout_next = url_index, logout_onlogout = None, register_next = url_index, register_onvalidation = [], register_onaccept = [], verify_email_next = url_login, verify_email_onaccept = [], profile_next = url_index, profile_onvalidation = [], profile_onaccept = [], retrieve_username_next = url_index, retrieve_password_next = url_index, request_reset_password_next = url_login, reset_password_next = url_index, change_password_next = url_index, change_password_onvalidation = [], change_password_onaccept = [], retrieve_password_onvalidation = [], request_reset_password_onvalidation = [], request_reset_password_onaccept = [], reset_password_onvalidation = [], reset_password_onaccept = [], hmac_key = hmac_key, formstyle = current.response.formstyle, label_separator = current.response.form_label_separator ) settings.lock_keys = True # ## these are messages that can be customized messages = self.messages = Messages(current.T) messages.update(Auth.default_messages) messages.update(ajax_failed_authentication= DIV(H4('NOT AUTHORIZED'), 'Please ', A('login', _href=self.settings.login_url + ('?_next=' + urllib.quote(current.request.env.http_web2py_component_location)) if current.request.env.http_web2py_component_location else ''), ' to view this content.', _class='not-authorized alert alert-block')) messages.lock_keys = True # for "remember me" option response = current.response if auth and auth.remember_me: # when user wants to be logged in for longer response.session_cookie_expires = auth.expiration if signature: self.define_signature() else: self.signature = None def get_vars_next(self): next = current.request.vars._next if isinstance(next, (list, tuple)): next = next[0] if next and self.settings.prevent_open_redirect_attacks: # Prevent an attacker from adding an arbitrary url after the # _next variable in the request. items = next.split('/') if '//' in next and items[2] != current.request.env.http_host: next = None return next def _get_user_id(self): """accessor for auth.user_id""" return self.user and self.user.id or None user_id = property(_get_user_id, doc="user.id or None") def table_user(self): return self.db[self.settings.table_user_name] def table_group(self): return self.db[self.settings.table_group_name] def table_membership(self): return self.db[self.settings.table_membership_name] def table_permission(self): return self.db[self.settings.table_permission_name] def table_event(self): return self.db[self.settings.table_event_name] def table_cas(self): return self.db[self.settings.table_cas_name] def table_token(self): return self.db[self.settings.table_token_name] def _HTTP(self, *a, **b): """ only used in lambda: self._HTTP(404) """ raise HTTP(*a, **b) def __call__(self): """ Example: Use as:: def authentication(): return dict(form=auth()) """ request = current.request args = request.args if not args: redirect(self.url(args='login', vars=request.vars)) elif args[0] in self.settings.actions_disabled: raise HTTP(404) if args[0] in ('login', 'logout', 'register', 'verify_email', 'retrieve_username', 'retrieve_password', 'reset_password', 'request_reset_password', 'change_password', 'profile', 'groups', 'impersonate', 'not_authorized', 'confirm_registration', 'bulk_register','manage_tokens'): if len(request.args) >= 2 and args[0] == 'impersonate': return getattr(self, args[0])(request.args[1]) else: return getattr(self, args[0])() elif args[0] == 'cas' and not self.settings.cas_provider: if args(1) == self.settings.cas_actions['login']: return self.cas_login(version=2) elif args(1) == self.settings.cas_actions['validate']: return self.cas_validate(version=1) elif args(1) == self.settings.cas_actions['servicevalidate']: return self.cas_validate(version=2, proxy=False) elif args(1) == self.settings.cas_actions['proxyvalidate']: return self.cas_validate(version=2, proxy=True) elif args(1) == self.settings.cas_actions['logout']: return self.logout(next=request.vars.service or DEFAULT) else: raise HTTP(404) def navbar(self, prefix='Welcome', action=None, separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT, referrer_actions=DEFAULT, mode='default'): """ Navbar with support for more templates This uses some code from the old navbar. Args: mode: see options for list of """ items = [] # Hold all menu items in a list self.bar = '' # The final T = current.T referrer_actions = [] if not referrer_actions else referrer_actions if not action: action = self.url(self.settings.function) request = current.request if URL() == action: next = '' else: next = '?_next=' + urllib.quote(URL(args=request.args, vars=request.get_vars)) href = lambda function: '%s/%s%s' % (action, function, next if referrer_actions is DEFAULT or function in referrer_actions else '') if isinstance(prefix, str): prefix = T(prefix) if prefix: prefix = prefix.strip() + ' ' def Anr(*a, **b): b['_rel'] = 'nofollow' return A(*a, **b) if self.user_id: # User is logged in logout_next = self.settings.logout_next items.append({'name': T('Log Out'), 'href': '%s/logout?_next=%s' % (action, urllib.quote( logout_next)), 'icon': 'icon-off'}) if not 'profile' in self.settings.actions_disabled: items.append({'name': T('Profile'), 'href': href('profile'), 'icon': 'icon-user'}) if not 'change_password' in self.settings.actions_disabled: items.append({'name': T('Password'), 'href': href('change_password'), 'icon': 'icon-lock'}) if user_identifier is DEFAULT: user_identifier = '%(first_name)s' if callable(user_identifier): user_identifier = user_identifier(self.user) elif ((isinstance(user_identifier, str) or type(user_identifier).__name__ == 'lazyT') and re.search(r'%\(.+\)s', user_identifier)): user_identifier = user_identifier % self.user if not user_identifier: user_identifier = '' else: # User is not logged in items.append({'name': T('Log In'), 'href': href('login'), 'icon': 'icon-off'}) if not 'register' in self.settings.actions_disabled: items.append({'name': T('Sign Up'), 'href': href('register'), 'icon': 'icon-user'}) if not 'request_reset_password' in self.settings.actions_disabled: items.append({'name': T('Lost password?'), 'href': href('request_reset_password'), 'icon': 'icon-lock'}) if (self.settings.use_username and not 'retrieve_username' in self.settings.actions_disabled): items.append({'name': T('Forgot username?'), 'href': href('retrieve_username'), 'icon': 'icon-edit'}) def menu(): # For inclusion in MENU self.bar = [(items[0]['name'], False, items[0]['href'], [])] del items[0] for item in items: self.bar[0][3].append((item['name'], False, item['href'])) def bootstrap3(): # Default web2py scaffolding def rename(icon): return icon+' '+icon.replace('icon', 'glyphicon') self.bar = UL(LI(Anr(I(_class=rename('icon '+items[0]['icon'])), ' ' + items[0]['name'], _href=items[0]['href'])), _class='dropdown-menu') del items[0] for item in items: self.bar.insert(-1, LI(Anr(I(_class=rename('icon '+item['icon'])), ' ' + item['name'], _href=item['href']))) self.bar.insert(-1, LI('', _class='divider')) if self.user_id: self.bar = LI(Anr(prefix, user_identifier, _href='#', _class="dropdown-toggle", data={'toggle': 'dropdown'}), self.bar, _class='dropdown') else: self.bar = LI(Anr(T('Log In'), _href='#', _class="dropdown-toggle", data={'toggle': 'dropdown'}), self.bar, _class='dropdown') def bare(): """ In order to do advanced customization we only need the prefix, the user_identifier and the href attribute of items Examples: Use as:: # in module custom_layout.py from gluon import * def navbar(auth_navbar): bar = auth_navbar user = bar["user"] if not user: btn_login = A(current.T("Login"), _href=bar["login"], _class="btn btn-success", _rel="nofollow") btn_register = A(current.T("Sign up"), _href=bar["register"], _class="btn btn-primary", _rel="nofollow") return DIV(btn_register, btn_login, _class="btn-group") else: toggletext = "%s back %s" % (bar["prefix"], user) toggle = A(toggletext, _href="#", _class="dropdown-toggle", _rel="nofollow", **{"_data-toggle": "dropdown"}) li_profile = LI(A(I(_class="icon-user"), ' ', current.T("Account details"), _href=bar["profile"], _rel="nofollow")) li_custom = LI(A(I(_class="icon-book"), ' ', current.T("My Agenda"), _href="#", rel="nofollow")) li_logout = LI(A(I(_class="icon-off"), ' ', current.T("logout"), _href=bar["logout"], _rel="nofollow")) dropdown = UL(li_profile, li_custom, LI('', _class="divider"), li_logout, _class="dropdown-menu", _role="menu") return LI(toggle, dropdown, _class="dropdown") # in models db.py import custom_layout as custom # in layout.html <ul id="navbar" class="nav pull-right"> {{='auth' in globals() and \ custom.navbar(auth.navbar(mode='bare')) or ''}}</ul> """ bare = {} bare['prefix'] = prefix bare['user'] = user_identifier if self.user_id else None for i in items: if i['name'] == T('Log In'): k = 'login' elif i['name'] == T('Sign Up'): k = 'register' elif i['name'] == T('Lost password?'): k = 'request_reset_password' elif i['name'] == T('Forgot username?'): k = 'retrieve_username' elif i['name'] == T('Log Out'): k = 'logout' elif i['name'] == T('Profile'): k = 'profile' elif i['name'] == T('Password'): k = 'change_password' bare[k] = i['href'] self.bar = bare options = {'asmenu': menu, 'dropdown': bootstrap3, 'bare': bare } # Define custom modes. if mode in options and callable(options[mode]): options[mode]() else: s1, s2, s3 = separators if self.user_id: self.bar = SPAN(prefix, user_identifier, s1, Anr(items[0]['name'], _href=items[0]['href']), s3, _class='auth_navbar') else: self.bar = SPAN(s1, Anr(items[0]['name'], _href=items[0]['href']), s3, _class='auth_navbar') for item in items[1:]: self.bar.insert(-1, s2) self.bar.insert(-1, Anr(item['name'], _href=item['href'])) return self.bar def __get_migrate(self, tablename, migrate=True): if type(migrate).__name__ == 'str': return (migrate + tablename + '.table') elif migrate == False: return False else: return True def enable_record_versioning(self, tables, archive_db=None, archive_names='%(tablename)s_archive', current_record='current_record', current_record_label=None): """ Used to enable full record versioning (including auth tables):: auth = Auth(db) auth.define_tables(signature=True) # define our own tables db.define_table('mything',Field('name'),auth.signature) auth.enable_record_versioning(tables=db) tables can be the db (all table) or a list of tables. only tables with modified_by and modified_on fiels (as created by auth.signature) will have versioning. Old record versions will be in table 'mything_archive' automatically defined. when you enable enable_record_versioning, records are never deleted but marked with is_active=False. enable_record_versioning enables a common_filter for every table that filters out records with is_active = False Note: If you use auth.enable_record_versioning, do not use auth.archive or you will end up with duplicates. auth.archive does explicitly what enable_record_versioning does automatically. """ current_record_label = current_record_label or current.T( current_record.replace('_', ' ').title()) for table in tables: fieldnames = table.fields() if ('id' in fieldnames and 'modified_on' in fieldnames and not current_record in fieldnames): table._enable_record_versioning( archive_db=archive_db, archive_name=archive_names, current_record=current_record, current_record_label=current_record_label) def define_signature(self): db = self.db settings = self.settings request = current.request T = current.T reference_user = 'reference %s' % settings.table_user_name def lazy_user(auth=self): return auth.user_id def represent(id, record=None, s=settings): try: user = s.table_user(id) return '%s %s' % (user.get("first_name", user.get("email")), user.get("last_name", '')) except: return id ondelete = self.settings.ondelete self.signature = Table( self.db, 'auth_signature', Field('is_active', 'boolean', default=True, readable=False, writable=False, label=T('Is Active')), Field('created_on', 'datetime', default=request.now, writable=False, readable=False, label=T('Created On')), Field('created_by', reference_user, default=lazy_user, represent=represent, writable=False, readable=False, label=T('Created By'), ondelete=ondelete), Field('modified_on', 'datetime', update=request.now, default=request.now, writable=False, readable=False, label=T('Modified On')), Field('modified_by', reference_user, represent=represent, default=lazy_user, update=lazy_user, writable=False, readable=False, label=T('Modified By'), ondelete=ondelete)) def define_tables(self, username=None, signature=None, enable_tokens=False, migrate=None, fake_migrate=None): """ To be called unless tables are defined manually Examples: Use as:: # defines all needed tables and table files # 'myprefix_auth_user.table', ... auth.define_tables(migrate='myprefix_') # defines all needed tables without migration/table files auth.define_tables(migrate=False) """ db = self.db if migrate is None: migrate = db._migrate if fake_migrate is None: fake_migrate = db._fake_migrate settings = self.settings if username is None: username = settings.use_username else: settings.use_username = username settings.enable_tokens = enable_tokens if not self.signature: self.define_signature() if signature == True: signature_list = [self.signature] elif not signature: signature_list = [] elif isinstance(signature, Table): signature_list = [signature] else: signature_list = signature is_not_empty = IS_NOT_EMPTY(error_message=self.messages.is_empty) is_crypted = CRYPT(key=settings.hmac_key, min_length=settings.password_min_length) is_unique_email = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_NOT_IN_DB(db, '%s.email' % settings.table_user_name, error_message=self.messages.email_taken)] if not settings.email_case_sensitive: is_unique_email.insert(1, IS_LOWER()) if not settings.table_user_name in db.tables: passfield = settings.password_field extra_fields = settings.extra_fields.get( settings.table_user_name, []) + signature_list if username or settings.cas_provider: is_unique_username = \ [IS_MATCH('[\w\.\-]+', strict=True, error_message=self.messages.invalid_username), IS_NOT_IN_DB(db, '%s.username' % settings.table_user_name, error_message=self.messages.username_taken)] if not settings.username_case_sensitive: is_unique_username.insert(1, IS_LOWER()) db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field('username', length=128, default='', label=self.messages.label_username, requires=is_unique_username), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(username)s')) else: db.define_table( settings.table_user_name, Field('first_name', length=128, default='', label=self.messages.label_first_name, requires=is_not_empty), Field('last_name', length=128, default='', label=self.messages.label_last_name, requires=is_not_empty), Field('email', length=512, default='', label=self.messages.label_email, requires=is_unique_email), Field(passfield, 'password', length=512, readable=False, label=self.messages.label_password, requires=[is_crypted]), Field('registration_key', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_key), Field('reset_password_key', length=512, writable=False, readable=False, default='', label=self.messages.label_reset_password_key), Field('registration_id', length=512, writable=False, readable=False, default='', label=self.messages.label_registration_id), *extra_fields, **dict( migrate=self.__get_migrate(settings.table_user_name, migrate), fake_migrate=fake_migrate, format='%(first_name)s %(last_name)s (%(id)s)')) reference_table_user = 'reference %s' % settings.table_user_name if not settings.table_group_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_group_name, []) + signature_list db.define_table( settings.table_group_name, Field('role', length=512, default='', label=self.messages.label_role, requires=IS_NOT_IN_DB(db, '%s.role' % settings.table_group_name)), Field('description', 'text', label=self.messages.label_description), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_group_name, migrate), fake_migrate=fake_migrate, format='%(role)s (%(id)s)')) reference_table_group = 'reference %s' % settings.table_group_name if not settings.table_membership_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_membership_name, []) + signature_list db.define_table( settings.table_membership_name, Field('user_id', reference_table_user, label=self.messages.label_user_id), Field('group_id', reference_table_group, label=self.messages.label_group_id), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_membership_name, migrate), fake_migrate=fake_migrate)) if not settings.table_permission_name in db.tables: extra_fields = settings.extra_fields.get( settings.table_permission_name, []) + signature_list db.define_table( settings.table_permission_name, Field('group_id', reference_table_group, label=self.messages.label_group_id), Field('name', default='default', length=512, label=self.messages.label_name, requires=is_not_empty), Field('table_name', length=512, label=self.messages.label_table_name), Field('record_id', 'integer', default=0, label=self.messages.label_record_id, requires=IS_INT_IN_RANGE(0, 10 ** 9)), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_permission_name, migrate), fake_migrate=fake_migrate)) if not settings.table_event_name in db.tables: db.define_table( settings.table_event_name, Field('time_stamp', 'datetime', default=current.request.now, label=self.messages.label_time_stamp), Field('client_ip', default=current.request.client, label=self.messages.label_client_ip), Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('origin', default='auth', length=512, label=self.messages.label_origin, requires=is_not_empty), Field('description', 'text', default='', label=self.messages.label_description, requires=is_not_empty), *settings.extra_fields.get(settings.table_event_name, []), **dict( migrate=self.__get_migrate( settings.table_event_name, migrate), fake_migrate=fake_migrate)) now = current.request.now if settings.cas_domains: if not settings.table_cas_name in db.tables: db.define_table( settings.table_cas_name, Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('created_on', 'datetime', default=now), Field('service', requires=IS_URL()), Field('ticket'), Field('renew', 'boolean', default=False), *settings.extra_fields.get(settings.table_cas_name, []), **dict( migrate=self.__get_migrate( settings.table_cas_name, migrate), fake_migrate=fake_migrate)) if settings.enable_tokens: extra_fields = settings.extra_fields.get( settings.table_token_name, []) + signature_list if not settings.table_token_name in db.tables: db.define_table( settings.table_token_name, Field('user_id', reference_table_user, default=None, label=self.messages.label_user_id), Field('expires_on', 'datetime', default=datetime.datetime(2999,12,31)), Field('token',writable=False,default=web2py_uuid(),unique=True), *extra_fields, **dict( migrate=self.__get_migrate( settings.table_token_name, migrate), fake_migrate=fake_migrate)) if not db._lazy_tables: settings.table_user = db[settings.table_user_name] settings.table_group = db[settings.table_group_name] settings.table_membership = db[settings.table_membership_name] settings.table_permission = db[settings.table_permission_name] settings.table_event = db[settings.table_event_name] if settings.cas_domains: settings.table_cas = db[settings.table_cas_name] if settings.cas_provider: # THIS IS NOT LAZY settings.actions_disabled = \ ['profile', 'register', 'change_password', 'request_reset_password', 'retrieve_username'] from gluon.contrib.login_methods.cas_auth import CasAuth maps = settings.cas_maps if not maps: table_user = self.table_user() maps = dict((name, lambda v, n=name: v.get(n, None)) for name in table_user.fields if name != 'id' and table_user[name].readable) maps['registration_id'] = \ lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user']) actions = [settings.cas_actions['login'], settings.cas_actions['servicevalidate'], settings.cas_actions['logout']] settings.login_form = CasAuth( casversion=2, urlbase=settings.cas_provider, actions=actions, maps=maps) return self def log_event(self, description, vars=None, origin='auth'): """ Examples: Use as:: auth.log_event(description='this happened', origin='auth') """ if not self.settings.logging_enabled or not description: return elif self.is_logged_in(): user_id = self.user.id else: user_id = None # user unknown vars = vars or {} # log messages should not be translated if type(description).__name__ == 'lazyT': description = description.m self.table_event().insert( description=str(description % vars), origin=origin, user_id=user_id) def get_or_create_user(self, keys, update_fields=['email'], login=True, get=True): """ Used for alternate login methods: If the user exists already then password is updated. If the user doesn't yet exist, then they are created. """ table_user = self.table_user() user = None checks = [] # make a guess about who this user is for fieldname in ['registration_id', 'username', 'email']: if fieldname in table_user.fields() and \ keys.get(fieldname, None): checks.append(fieldname) value = keys[fieldname] user = table_user(**{fieldname: value}) if user: break if not checks: return None if not 'registration_id' in keys: keys['registration_id'] = keys[checks[0]] # if we think we found the user but registration_id does not match, # make new user if 'registration_id' in checks \ and user \ and user.registration_id \ and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])): user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER? if user: if not get: # added for register_bare to avoid overwriting users return None update_keys = dict(registration_id=keys['registration_id']) for key in update_fields: if key in keys: update_keys[key] = keys[key] user.update_record(**update_keys) elif checks: if not 'first_name' in keys and 'first_name' in table_user.fields: guess = keys.get('email', 'anonymous').split('@')[0] keys['first_name'] = keys.get('username', guess) user_id = table_user.insert(**table_user._filter_fields(keys)) user = table_user[user_id] if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % user) self.add_membership(group_id, user_id) if self.settings.everybody_group_id: self.add_membership(self.settings.everybody_group_id, user_id) if login: self.user = user return user def basic(self, basic_auth_realm=False): """ Performs basic login. Args: basic_auth_realm: optional basic http authentication realm. Can take str or unicode or function or callable or boolean. reads current.request.env.http_authorization and returns basic_allowed,basic_accepted,user. if basic_auth_realm is defined is a callable it's return value is used to set the basic authentication realm, if it's a string its content is used instead. Otherwise basic authentication realm is set to the application name. If basic_auth_realm is None or False (the default) the behavior is to skip sending any challenge. """ if not self.settings.allow_basic_login: return (False, False, False) basic = current.request.env.http_authorization if basic_auth_realm: if callable(basic_auth_realm): basic_auth_realm = basic_auth_realm() elif isinstance(basic_auth_realm, (unicode, str)): basic_realm = unicode(basic_auth_realm) elif basic_auth_realm is True: basic_realm = u'' + current.request.application http_401 = HTTP(401, u'Not Authorized', **{'WWW-Authenticate': u'Basic realm="' + basic_realm + '"'}) if not basic or not basic[:6].lower() == 'basic ': if basic_auth_realm: raise http_401 return (True, False, False) (username, sep, password) = base64.b64decode(basic[6:]).partition(':') is_valid_user = sep and self.login_bare(username, password) if not is_valid_user and basic_auth_realm: raise http_401 return (True, True, is_valid_user) def login_user(self, user): """ Logins the `user = db.auth_user(id)` """ from gluon.settings import global_settings if global_settings.web2py_runtime_gae: user = Row(self.table_user()._filter_fields(user, id=True)) delattr(user, 'password') else: user = Row(user) for key, value in user.items(): if callable(value) or key == 'password': delattr(user, key) if self.settings.renew_session_onlogin: current.session.renew(clear_session=not self.settings.keep_session_onlogin) current.session.auth = Storage(user=user, last_visit=current.request.now, expiration=self.settings.expiration, hmac_key=web2py_uuid()) self.user = user self.update_groups() def _get_login_settings(self): table_user = self.table_user() userfield = self.settings.login_userfield or 'username' \ if 'username' in table_user.fields else 'email' passfield = self.settings.password_field return Storage({"table_user": table_user, "userfield": userfield, "passfield": passfield}) def login_bare(self, username, password): """ Logins user as specified by username (or email) and password """ settings = self._get_login_settings() user = settings.table_user(**{settings.userfield: \ username}) if user and user.get(settings.passfield, False): password = settings.table_user[ settings.passfield].validate(password)[0] if ((user.registration_key is None or not user.registration_key.strip()) and password == user[settings.passfield]): self.login_user(user) return user else: # user not in database try other login methods for login_method in self.settings.login_methods: if login_method != self and login_method(username, password): self.user = user return user return False def register_bare(self, **fields): """ Registers a user as specified by username (or email) and a raw password. """ settings = self._get_login_settings() # users can register_bare even if no password is provided, # in this case they will have to reset their password to login if fields.get(settings.passfield): fields[settings.passfield] = \ settings.table_user[settings.passfield].validate(fields[settings.passfield])[0] if not fields.get(settings.userfield): raise ValueError("register_bare: " + "userfield not provided or invalid") user = self.get_or_create_user(fields, login=False, get=False, update_fields=self.settings.update_fields) if not user: # get or create did not create a user (it ignores duplicate records) return False return user def cas_login(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, version=2, ): request = current.request response = current.response session = current.session db, table = self.db, self.table_cas() session._cas_service = request.vars.service or session._cas_service if not request.env.http_host in self.settings.cas_domains or \ not session._cas_service: raise HTTP(403, 'not authorized') def allow_access(interactivelogin=False): row = table(service=session._cas_service, user_id=self.user.id) if row: ticket = row.ticket else: ticket = 'ST-' + web2py_uuid() table.insert(service=session._cas_service, user_id=self.user.id, ticket=ticket, created_on=request.now, renew=interactivelogin) service = session._cas_service query_sep = '&' if '?' in service else '?' del session._cas_service if 'warn' in request.vars and not interactivelogin: response.headers[ 'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket return A("Continue to %s" % service, _href=service + query_sep + "ticket=" + ticket) else: redirect(service + query_sep + "ticket=" + ticket) if self.is_logged_in() and not 'renew' in request.vars: return allow_access() elif not self.is_logged_in() and 'gateway' in request.vars: redirect(service) def cas_onaccept(form, onaccept=onaccept): if not onaccept is DEFAULT: onaccept(form) return allow_access(interactivelogin=True) return self.login(next, onvalidation, cas_onaccept, log) def cas_validate(self, version=2, proxy=False): request = current.request db, table = self.db, self.table_cas() current.response.headers['Content-Type'] = 'text' ticket = request.vars.ticket renew = 'renew' in request.vars row = table(ticket=ticket) success = False if row: userfield = self.settings.login_userfield or 'username' \ if 'username' in table.fields else 'email' # If ticket is a service Ticket and RENEW flag respected if ticket[0:3] == 'ST-' and \ not ((row.renew and renew) ^ renew): user = self.table_user()(row.user_id) row.delete_record() success = True def build_response(body): return '<?xml version="1.0" encoding="UTF-8"?>\n' +\ TAG['cas:serviceResponse']( body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml() if success: if version == 1: message = 'yes\n%s' % user[userfield] else: # assume version 2 username = user.get('username', user[userfield]) message = build_response( TAG['cas:authenticationSuccess']( TAG['cas:user'](username), *[TAG['cas:' + field.name](user[field.name]) for field in self.table_user() if field.readable])) else: if version == 1: message = 'no\n' elif row: message = build_response(TAG['cas:authenticationFailure']()) else: message = build_response( TAG['cas:authenticationFailure']( 'Ticket %s not recognized' % ticket, _code='INVALID TICKET')) raise HTTP(200, message) def _reset_two_factor_auth(self, session): """When two-step authentication is enabled, this function is used to clear the session after successfully completing second challenge or when the maximum number of tries allowed has expired. """ session.auth_two_factor_user = None session.auth_two_factor = None session.auth_two_factor_enabled = False # Allow up to 4 attempts (the 1st one plus 3 more) session.auth_two_factor_tries_left = 3 def login(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a login form """ table_user = self.table_user() settings = self.settings if 'username' in table_user.fields or \ not settings.login_email_validate: tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty) if not settings.username_case_sensitive: tmpvalidator = [IS_LOWER(), tmpvalidator] else: tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email) if not settings.email_case_sensitive: tmpvalidator = [IS_LOWER(), tmpvalidator] request = current.request response = current.response session = current.session passfield = settings.password_field try: table_user[passfield].requires[-1].min_length = 0 except: pass ### use session for federated login snext = self.get_vars_next() if snext: session._auth_next = snext elif session._auth_next: snext = session._auth_next ### pass if next is DEFAULT: # important for security next = settings.login_next if callable(next): next = next() user_next = snext if user_next: external = user_next.split('://') if external[0].lower() in ['http', 'https', 'ftp']: host_next = user_next.split('//', 1)[-1].split('/')[0] if host_next in settings.cas_domains: next = user_next else: next = user_next if onvalidation is DEFAULT: onvalidation = settings.login_onvalidation if onaccept is DEFAULT: onaccept = settings.login_onaccept if log is DEFAULT: log = self.messages['login_log'] onfail = settings.login_onfail user = None # default #Setup the default field used for the form multi_login = False if self.settings.login_userfield: username = self.settings.login_userfield else: if 'username' in table_user.fields: username = 'username' else: username = 'email' if self.settings.multi_login: multi_login = True old_requires = table_user[username].requires table_user[username].requires = tmpvalidator # If two-factor authentication is enabled, and the maximum # number of tries allowed is used up, reset the session to # pre-login state with two-factor auth if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1: # Exceeded maximum allowed tries for this code. Require user to enter # username and password again. user = None accepted_form = False self._reset_two_factor_auth(session) # Redirect to the default 'next' page without logging # in. If that page requires login, user will be redirected # back to the main login form redirect(next, client_side=settings.client_side) # Before showing the default login form, check whether # we are already on the second step of two-step authentication. # If we are, then skip this login form and use the form for the # second challenge instead. # Note to devs: The code inside the if-block is unchanged from the # previous version of this file, other than for indentation inside # to put it inside the if-block if session.auth_two_factor_user is None: if settings.remember_me_form: extra_fields = [ Field('remember_me', 'boolean', default=False, label = self.messages.label_remember_me)] else: extra_fields = [] # do we use our own login form, or from a central source? if settings.login_form == self: form = SQLFORM( table_user, fields=[username, passfield], hidden=dict(_next=next), showid=settings.showid, submit_button=self.messages.login_button, delete_label=self.messages.delete_label, formstyle=settings.formstyle, separator=settings.label_separator, extra_fields = extra_fields, ) captcha = settings.login_captcha or \ (settings.login_captcha != False and settings.captcha) if captcha: addrow(form, captcha.label, captcha, captcha.comment, settings.formstyle, 'captcha__row') accepted_form = False if form.accepts(request, session if self.csrf_prevention else None, formname='login', dbio=False, onvalidation=onvalidation, hideerror=settings.hideerror): accepted_form = True # check for username in db entered_username = form.vars[username] if multi_login and '@' in entered_username: # if '@' in username check for email, not username user = table_user(email = entered_username) else: user = table_user(**{username: entered_username}) if user: # user in db, check if registration pending or disabled temp_user = user if temp_user.registration_key == 'pending': response.flash = self.messages.registration_pending return form elif temp_user.registration_key in ('disabled', 'blocked'): response.flash = self.messages.login_disabled return form elif (not temp_user.registration_key is None and temp_user.registration_key.strip()): response.flash = \ self.messages.registration_verifying return form # try alternate logins 1st as these have the # current version of the password user = None for login_method in settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, settings.update_fields) break if not user: # alternates have failed, maybe because service inaccessible if settings.login_methods[0] == self: # try logging in locally using cached credentials if form.vars.get(passfield, '') == temp_user[passfield]: # success user = temp_user else: # user not in db if not settings.alternate_requires_registration: # we're allowed to auto-register users from external systems for login_method in settings.login_methods: if login_method != self and \ login_method(request.vars[username], request.vars[passfield]): if not self in settings.login_methods: # do not store password in db form.vars[passfield] = None user = self.get_or_create_user( form.vars, settings.update_fields) break if not user: self.log_event(self.messages['login_failed_log'], request.post_vars) # invalid login session.flash = self.messages.invalid_login callback(onfail, None) redirect( self.url(args=request.args, vars=request.get_vars), client_side=settings.client_side) else: # use a central authentication server cas = settings.login_form cas_user = cas.get_user() if cas_user: cas_user[passfield] = None user = self.get_or_create_user( table_user._filter_fields(cas_user), settings.update_fields) elif hasattr(cas, 'login_form'): return cas.login_form() else: # we need to pass through login again before going on next = self.url(settings.function, args='login') redirect(cas.login_url(next), client_side=settings.client_side) # Extra login logic for two-factor authentication ################################################# # If the 'user' variable has a value, this means that the first # authentication step was successful (i.e. user provided correct # username and password at the first challenge). # Check if this user is signed up for two-factor authentication # Default rule is that the user must be part of a group that is called # auth.settings.two_factor_authentication_group if user and self.settings.two_factor_authentication_group: role = self.settings.two_factor_authentication_group session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role) # challenge if session.auth_two_factor_enabled: form = SQLFORM.factory( Field('authentication_code', required=True, comment='This code was emailed to you and is required for login.'), hidden=dict(_next=next), formstyle=settings.formstyle, separator=settings.label_separator ) # accepted_form is used by some default web2py code later in the # function that handles running specified functions before redirect # Set it to False until the challenge form is accepted. accepted_form = False # Handle the case when a user has submitted the login/password # form successfully, and the password has been validated, but # the two-factor form has not been displayed or validated yet. if session.auth_two_factor_user is None and user is not None: session.auth_two_factor_user = user # store the validated user and associate with this session session.auth_two_factor = random.randint(100000, 999999) session.auth_two_factor_tries_left = 3 # Allow user to try up to 4 times # TODO: Add some error checking to handle cases where email cannot be sent self.settings.mailer.send( to=user.email, subject="Two-step Login Authentication Code", message="Your temporary login code is {0}".format(session.auth_two_factor)) if form.accepts(request, session if self.csrf_prevention else None, formname='login', dbio=False, onvalidation=onvalidation, hideerror=settings.hideerror): accepted_form = True if form.vars['authentication_code'] == str(session.auth_two_factor): # Handle the case when the two-factor form has been successfully validated # and the user was previously stored (the current user should be None because # in this case, the previous username/password login form should not be displayed. # This will allow the code after the 2-factor authentication block to proceed as # normal. if user is None or user == session.auth_two_factor_user: user = session.auth_two_factor_user # For security, because the username stored in the # session somehow does not match the just validated # user. Should not be possible without session stealing # which is hard with SSL. elif user != session.auth_two_factor_user: user = None # Either way, the user and code associated with this session should # be removed. This handles cases where the session login may have # expired but browser window is open, so the old session key and # session usernamem will still exist self._reset_two_factor_auth(session) else: # TODO: Limit the number of retries allowed. response.flash = 'Incorrect code. {0} more attempt(s) remaining.'.format(session.auth_two_factor_tries_left) session.auth_two_factor_tries_left -= 1 return form else: return form # End login logic for two-factor authentication # process authenticated users if user: user = Row(table_user._filter_fields(user, id=True)) # process authenticated users # user wants to be logged in for longer self.login_user(user) session.auth.expiration = \ request.post_vars.remember_me and \ settings.long_expiration or \ settings.expiration session.auth.remember_me = 'remember_me' in request.post_vars self.log_event(log, user) session.flash = self.messages.logged_in # how to continue if settings.login_form == self: if accepted_form: callback(onaccept, form) if next == session._auth_next: session._auth_next = None next = replace_id(next, form) redirect(next, client_side=settings.client_side) table_user[username].requires = old_requires return form elif user: callback(onaccept, None) if next == session._auth_next: del session._auth_next redirect(next, client_side=settings.client_side) def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT): """ Logouts and redirects to login """ # Clear out 2-step authentication information if user logs # out. This information is also cleared on successful login. self._reset_two_factor_auth(current.session) if next is DEFAULT: next = self.get_vars_next() or self.settings.logout_next if onlogout is DEFAULT: onlogout = self.settings.logout_onlogout if onlogout: onlogout(self.user) if log is DEFAULT: log = self.messages['logout_log'] if self.user: self.log_event(log, self.user) if self.settings.login_form != self: cas = self.settings.login_form cas_user = cas.get_user() if cas_user: next = cas.logout_url(next) current.session.auth = None if self.settings.renew_session_onlogout: current.session.renew(clear_session=not self.settings.keep_session_onlogout) current.session.flash = self.messages.logged_out if not next is None: redirect(next) def register(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a registration form """ table_user = self.table_user() request = current.request response = current.response session = current.session if self.is_logged_in(): redirect(self.settings.logged_url, client_side=self.settings.client_side) if next is DEFAULT: next = self.get_vars_next() or self.settings.register_next if onvalidation is DEFAULT: onvalidation = self.settings.register_onvalidation if onaccept is DEFAULT: onaccept = self.settings.register_onaccept if log is DEFAULT: log = self.messages['register_log'] table_user = self.table_user() if self.settings.login_userfield: username = self.settings.login_userfield elif 'username' in table_user.fields: username = 'username' else: username = 'email' # Ensure the username field is unique. unique_validator = IS_NOT_IN_DB(self.db, table_user[username]) if not table_user[username].requires: table_user[username].requires = unique_validator elif isinstance(table_user[username].requires, (list, tuple)): if not any([isinstance(validator, IS_NOT_IN_DB) for validator in table_user[username].requires]): if isinstance(table_user[username].requires, list): table_user[username].requires.append(unique_validator) else: table_user[username].requires += (unique_validator, ) elif not isinstance(table_user[username].requires, IS_NOT_IN_DB): table_user[username].requires = [table_user[username].requires, unique_validator] passfield = self.settings.password_field formstyle = self.settings.formstyle if self.settings.register_verify_password: extra_fields = [ Field("password_two", "password", requires=IS_EQUAL_TO( request.post_vars.get(passfield, None), error_message=self.messages.mismatched_password), label=current.T("Confirm Password"))] else: extra_fields = [] form = SQLFORM(table_user, fields=self.settings.register_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.register_button, delete_label=self.messages.delete_label, formstyle=formstyle, separator=self.settings.label_separator, extra_fields = extra_fields ) captcha = self.settings.register_captcha or self.settings.captcha if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') #Add a message if specified if self.settings.pre_registration_div: addrow(form, '', DIV(_id="pre-reg", *self.settings.pre_registration_div), '', formstyle, '') table_user.registration_key.default = key = web2py_uuid() if form.accepts(request, session if self.csrf_prevention else None, formname='register', onvalidation=onvalidation, hideerror=self.settings.hideerror): description = self.messages.group_description % form.vars if self.settings.create_user_groups: group_id = self.add_group( self.settings.create_user_groups % form.vars, description) self.add_membership(group_id, form.vars.id) if self.settings.everybody_group_id: self.add_membership( self.settings.everybody_group_id, form.vars.id) if self.settings.registration_requires_verification: link = self.url( self.settings.function, args=('verify_email', key), scheme=True) d = dict(form.vars) d.update(dict(key=key, link=link, username=form.vars[username])) if not (self.settings.mailer and self.settings.mailer.send( to=form.vars.email, subject=self.messages.verify_email_subject, message=self.messages.verify_email % d)): self.db.rollback() response.flash = self.messages.unable_send_email return form session.flash = self.messages.email_sent if self.settings.registration_requires_approval and \ not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='pending') session.flash = self.messages.registration_pending elif (not self.settings.registration_requires_verification or self.settings.login_after_registration): if not self.settings.registration_requires_verification: table_user[form.vars.id] = dict(registration_key='') session.flash = self.messages.registration_successful user = table_user(**{username: form.vars[username]}) self.login_user(user) session.flash = self.messages.logged_in self.log_event(log, form.vars) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def is_logged_in(self): """ Checks if the user is logged in and returns True/False. If so user is in auth.user as well as in session.auth.user """ if self.user: return True return False def verify_email(self, next=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Action used to verify the registration email """ key = getarg(-1) table_user = self.table_user() user = table_user(registration_key=key) if not user: redirect(self.settings.login_url) if self.settings.registration_requires_approval: user.update_record(registration_key='pending') current.session.flash = self.messages.registration_pending else: user.update_record(registration_key='') current.session.flash = self.messages.email_verified # make sure session has same user.registrato_key as db record if current.session.auth and current.session.auth.user: current.session.auth.user.registration_key = user.registration_key if log is DEFAULT: log = self.messages['verify_email_log'] if next is DEFAULT: next = self.settings.verify_email_next if onaccept is DEFAULT: onaccept = self.settings.verify_email_onaccept self.log_event(log, user) callback(onaccept, user) redirect(next) def retrieve_username(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to retrieve the user username (only if there is a username field) """ table_user = self.table_user() if not 'username' in table_user.fields: raise HTTP(404) request = current.request response = current.response session = current.session captcha = self.settings.retrieve_username_captcha or \ (self.settings.retrieve_username_captcha != False and self.settings.captcha) if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.get_vars_next() or self.settings.retrieve_username_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_username_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_username_onaccept if log is DEFAULT: log = self.messages['retrieve_username_log'] old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session if self.csrf_prevention else None, formname='retrieve_username', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): users = table_user._db(table_user.email==form.vars.email).select() if not users: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) username = ', '.join(u.username for u in users) self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_username_subject, message=self.messages.retrieve_username % dict(username=username)) session.flash = self.messages.email_sent for user in users: self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def random_password(self): import string import random password = '' specials = r'!#$*' for i in range(0, 3): password += random.choice(string.lowercase) password += random.choice(string.uppercase) password += random.choice(string.digits) password += random.choice(specials) return ''.join(random.sample(password, len(password))) def reset_password_deprecated(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password (deprecated) """ table_user = self.table_user() request = current.request response = current.response session = current.session if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if next is DEFAULT: next = self.get_vars_next() or self.settings.retrieve_password_next if onvalidation is DEFAULT: onvalidation = self.settings.retrieve_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.retrieve_password_onaccept if log is DEFAULT: log = self.messages['retrieve_password_log'] old_requires = table_user.email.requires table_user.email.requires = [IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] form = SQLFORM(table_user, fields=['email'], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session if self.csrf_prevention else None, formname='retrieve_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(email=form.vars.email) if not user: current.session.flash = \ self.messages.invalid_email redirect(self.url(args=request.args)) elif user.registration_key in ('pending', 'disabled', 'blocked'): current.session.flash = \ self.messages.registration_pending redirect(self.url(args=request.args)) password = self.random_password() passfield = self.settings.password_field d = { passfield: str(table_user[passfield].validate(password)[0]), 'registration_key': '' } user.update_record(**d) if self.settings.mailer and \ self.settings.mailer.send(to=form.vars.email, subject=self.messages.retrieve_password_subject, message=self.messages.retrieve_password % dict(password=password)): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next) table_user.email.requires = old_requires return form def confirm_registration( self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to confirm user registration """ table_user = self.table_user() request = current.request # response = current.response session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.reset_password_next if self.settings.prevent_password_reset_attacks: key = request.vars.key if not key and len(request.args)>1: key = request.args[-1] if key: session._reset_password_key = key redirect(self.url(args='confirm_registration')) else: key = session._reset_password_key else: key = request.vars.key or getarg(-1) try: t0 = int(key.split('-')[0]) if time.time() - t0 > 60 * 60 * 24: raise Exception user = table_user(reset_password_key=key) if not user: raise Exception except Exception as e: session.flash = self.messages.invalid_reset_password redirect(self.url('login', vars=dict(test=e))) redirect(next, client_side=self.settings.client_side) passfield = self.settings.password_field form = SQLFORM.factory( Field('first_name', label='First Name', required=True), Field('last_name', label='Last Name', required=True), Field('new_password', 'password', label=self.messages.new_password, requires=self.table_user()[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button='Confirm Registration', hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.process().accepted: user.update_record( **{passfield: str(form.vars.new_password), 'first_name': str(form.vars.first_name), 'last_name': str(form.vars.last_name), 'registration_key': '', 'reset_password_key': ''}) session.flash = self.messages.password_changed if self.settings.login_after_password_change: self.login_user(user) redirect(next, client_side=self.settings.client_side) return form def email_registration(self, subject, body, user): """ Sends and email invitation to a user informing they have been registered with the application """ reset_password_key = str(int(time.time())) + '-' + web2py_uuid() link = self.url(self.settings.function, args=('confirm_registration',), vars={'key': reset_password_key}, scheme=True) d = dict(user) d.update(dict(key=reset_password_key, link=link, site=current.request.env.http_host)) if self.settings.mailer and self.settings.mailer.send( to=user.email, subject=subject % d, message=body % d): user.update_record(reset_password_key=reset_password_key) return True return False def bulk_register(self, max_emails=100): """ Creates a form for ther user to send invites to other users to join """ if not self.user: redirect(self.settings.login_url) if not self.setting.bulk_register_enabled: return HTTP(404) form = SQLFORM.factory( Field('subject','string',default=self.messages.bulk_invite_subject,requires=IS_NOT_EMPTY()), Field('emails','text',requires=IS_NOT_EMPTY()), Field('message','text',default=self.messages.bulk_invite_body,requires=IS_NOT_EMPTY()), formstyle=self.settings.formstyle) if form.process().accepted: emails = re.compile('[^\s\'"@<>,;:]+\@[^\s\'"@<>,;:]+').findall(form.vars.emails) # send the invitations emails_sent = [] emails_fail = [] emails_exist = [] for email in emails[:max_emails]: if self.table_user()(email=email): emails_exist.append(email) else: user = self.register_bare(email=email) if self.email_registration(form.vars.subject, form.vars.message, user): emails_sent.append(email) else: emails_fail.append(email) emails_fail += emails[max_emails:] form = DIV(H4('Emails sent'),UL(*[A(x,_href='mailto:'+x) for x in emails_sent]), H4('Emails failed'),UL(*[A(x,_href='mailto:'+x) for x in emails_fail]), H4('Emails existing'),UL(*[A(x,_href='mailto:'+x) for x in emails_exist])) return form def manage_tokens(self): if not self.user: redirect(self.settings.login_url) table_token =self.table_token() table_token.user_id.writable = False table_token.user_id.default = self.user.id table_token.token.writable = False if current.request.args(1) == 'new': table_token.token.readable = False form = SQLFORM.grid(table_token, args=['manage_tokens']) return form def reset_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password """ table_user = self.table_user() request = current.request # response = current.response session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.reset_password_next if self.settings.prevent_password_reset_attacks: key = request.vars.key if key: session._reset_password_key = key redirect(self.url(args='reset_password')) else: key = session._reset_password_key else: key = request.vars.key try: t0 = int(key.split('-')[0]) if time.time() - t0 > 60 * 60 * 24: raise Exception user = table_user(reset_password_key=key) if not user: raise Exception except Exception: session.flash = self.messages.invalid_reset_password redirect(next, client_side=self.settings.client_side) if onvalidation is DEFAULT: onvalidation = self.settings.reset_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.reset_password_onaccept passfield = self.settings.password_field form = SQLFORM.factory( Field('new_password', 'password', label=self.messages.new_password, requires=self.table_user()[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_reset_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, onvalidation=onvalidation, hideerror=self.settings.hideerror): user.update_record( **{passfield: str(form.vars.new_password), 'registration_key': '', 'reset_password_key': ''}) session.flash = self.messages.password_changed if self.settings.login_after_password_change: self.login_user(user) callback(onaccept, form) redirect(next, client_side=self.settings.client_side) return form def request_reset_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form to reset the user password """ table_user = self.table_user() request = current.request response = current.response session = current.session captcha = self.settings.retrieve_password_captcha or \ (self.settings.retrieve_password_captcha != False and self.settings.captcha) if next is DEFAULT: next = self.get_vars_next() or self.settings.request_reset_password_next if not self.settings.mailer: response.flash = self.messages.function_disabled return '' if onvalidation is DEFAULT: onvalidation = self.settings.request_reset_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.request_reset_password_onaccept if log is DEFAULT: log = self.messages['reset_password_log'] userfield = self.settings.login_userfield or 'username' \ if 'username' in table_user.fields else 'email' if userfield == 'email': table_user.email.requires = [ IS_EMAIL(error_message=self.messages.invalid_email), IS_IN_DB(self.db, table_user.email, error_message=self.messages.invalid_email)] if not self.settings.email_case_sensitive: table_user.email.requires.insert(0, IS_LOWER()) else: table_user.username.requires = [ IS_IN_DB(self.db, table_user.username, error_message=self.messages.invalid_username)] if not self.settings.username_case_sensitive: table_user.username.requires.insert(0, IS_LOWER()) form = SQLFORM(table_user, fields=[userfield], hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.password_reset_button, delete_label=self.messages.delete_label, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if form.accepts(request, session if self.csrf_prevention else None, formname='reset_password', dbio=False, onvalidation=onvalidation, hideerror=self.settings.hideerror): user = table_user(**{userfield:form.vars.get(userfield)}) if not user: session.flash = self.messages['invalid_%s' % userfield] redirect(self.url(args=request.args), client_side=self.settings.client_side) elif user.registration_key in ('pending', 'disabled', 'blocked'): session.flash = self.messages.registration_pending redirect(self.url(args=request.args), client_side=self.settings.client_side) if self.email_reset_password(user): session.flash = self.messages.email_sent else: session.flash = self.messages.unable_to_send_email self.log_event(log, user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) # old_requires = table_user.email.requires return form def email_reset_password(self, user): reset_password_key = str(int(time.time())) + '-' + web2py_uuid() link = self.url(self.settings.function, args=('reset_password',), vars={'key': reset_password_key}, scheme=True) d = dict(user) d.update(dict(key=reset_password_key, link=link)) if self.settings.mailer and self.settings.mailer.send( to=user.email, subject=self.messages.reset_password_subject, message=self.messages.reset_password % d): user.update_record(reset_password_key=reset_password_key) return True return False def retrieve_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): if self.settings.reset_password_requires_verification: return self.request_reset_password(next, onvalidation, onaccept, log) else: return self.reset_password_deprecated(next, onvalidation, onaccept, log) def change_password(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form that lets the user change password """ if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) db = self.db table_user = self.table_user() s = db(table_user.id == self.user.id) request = current.request session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.change_password_next if onvalidation is DEFAULT: onvalidation = self.settings.change_password_onvalidation if onaccept is DEFAULT: onaccept = self.settings.change_password_onaccept if log is DEFAULT: log = self.messages['change_password_log'] passfield = self.settings.password_field requires = table_user[passfield].requires if not isinstance(requires, (list, tuple)): requires = [requires] requires = filter(lambda t: isinstance(t, CRYPT), requires) if requires: requires[0].min_length = 0 form = SQLFORM.factory( Field('old_password', 'password', requires=requires, label=self.messages.old_password), Field('new_password', 'password', label=self.messages.new_password, requires=table_user[passfield].requires), Field('new_password2', 'password', label=self.messages.verify_password, requires=[IS_EXPR( 'value==%s' % repr(request.vars.new_password), self.messages.mismatched_password)]), submit_button=self.messages.password_change_button, hidden=dict(_next=next), formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if form.accepts(request, session, formname='change_password', onvalidation=onvalidation, hideerror=self.settings.hideerror): current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first() if not form.vars['old_password'] == current_user[passfield]: form.errors['old_password'] = self.messages.invalid_password else: d = {passfield: str(form.vars.new_password)} s.update(**d) session.flash = self.messages.password_changed self.log_event(log, self.user) callback(onaccept, form) if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def profile(self, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, ): """ Returns a form that lets the user change his/her profile """ table_user = self.table_user() if not self.is_logged_in(): redirect(self.settings.login_url, client_side=self.settings.client_side) passfield = self.settings.password_field table_user[passfield].writable = False request = current.request session = current.session if next is DEFAULT: next = self.get_vars_next() or self.settings.profile_next if onvalidation is DEFAULT: onvalidation = self.settings.profile_onvalidation if onaccept is DEFAULT: onaccept = self.settings.profile_onaccept if log is DEFAULT: log = self.messages['profile_log'] form = SQLFORM( table_user, self.user.id, fields=self.settings.profile_fields, hidden=dict(_next=next), showid=self.settings.showid, submit_button=self.messages.profile_save_button, delete_label=self.messages.delete_label, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, deletable=self.settings.allow_delete_accounts, ) if form.accepts(request, session, formname='profile', onvalidation=onvalidation, hideerror=self.settings.hideerror): self.user.update(table_user._filter_fields(form.vars)) session.flash = self.messages.profile_updated self.log_event(log, self.user) callback(onaccept, form) if form.deleted: return self.logout() if not next: next = self.url(args=request.args) else: next = replace_id(next, form) redirect(next, client_side=self.settings.client_side) return form def run_login_onaccept(self): onaccept = self.settings.login_onaccept if onaccept: form = Storage(dict(vars=self.user)) if not isinstance(onaccept, (list, tuple)): onaccept = [onaccept] for callback in onaccept: callback(form) def is_impersonating(self): return self.is_logged_in() and 'impersonator' in current.session.auth def impersonate(self, user_id=DEFAULT): """ To use this make a POST to `http://..../impersonate request.post_vars.user_id=<id>` Set request.post_vars.user_id to 0 to restore original user. requires impersonator is logged in and:: has_permission('impersonate', 'auth_user', user_id) """ request = current.request session = current.session auth = session.auth table_user = self.table_user() if not self.is_logged_in(): raise HTTP(401, "Not Authorized") current_id = auth.user.id requested_id = user_id if user_id is DEFAULT: user_id = current.request.post_vars.user_id if user_id and user_id != self.user.id and user_id != '0': if not self.has_permission('impersonate', self.table_user(), user_id): raise HTTP(403, "Forbidden") user = table_user(user_id) if not user: raise HTTP(401, "Not Authorized") auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL) auth.user.update( table_user._filter_fields(user, True)) self.user = auth.user self.update_groups() log = self.messages['impersonate_log'] self.log_event(log, dict(id=current_id, other_id=auth.user.id)) self.run_login_onaccept() elif user_id in (0, '0'): if self.is_impersonating(): session.clear() session.update(pickle.loads(auth.impersonator)) self.user = session.auth.user self.update_groups() self.run_login_onaccept() return None if requested_id is DEFAULT and not request.post_vars: return SQLFORM.factory(Field('user_id', 'integer')) return SQLFORM(table_user, user.id, readonly=True) def update_groups(self): if not self.user: return user_groups = self.user_groups = {} if current.session.auth: current.session.auth.user_groups = self.user_groups table_group = self.table_group() table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() for membership in memberships: group = table_group(membership.group_id) if group: user_groups[membership.group_id] = group.role def groups(self): """ Displays the groups and their roles for the logged in user """ if not self.is_logged_in(): redirect(self.settings.login_url) table_membership = self.table_membership() memberships = self.db( table_membership.user_id == self.user.id).select() table = TABLE() for membership in memberships: table_group = self.table_group() groups = self.db(table_group.id == membership.group_id).select() if groups: group = groups[0] table.append(TR(H3(group.role, '(%s)' % group.id))) table.append(TR(P(group.description))) if not memberships: return None return table def not_authorized(self): """ You can change the view for this page to make it look as you like """ if current.request.ajax: raise HTTP(403, 'ACCESS DENIED') return self.messages.access_denied def requires(self, condition, requires_login=True, otherwise=None): """ Decorator that prevents access to action if not logged in """ def decorator(action): def f(*a, **b): basic_allowed, basic_accepted, user = self.basic() user = user or self.user if requires_login: if not user: if current.request.ajax: raise HTTP(401, self.messages.ajax_failed_authentication) elif not otherwise is None: if callable(otherwise): return otherwise() redirect(otherwise) elif self.settings.allow_basic_login_only or \ basic_accepted or current.request.is_restful: raise HTTP(403, "Not authorized") else: next = self.here() current.session.flash = current.response.flash return call_or_redirect( self.settings.on_failed_authentication, self.settings.login_url + '?_next=' + urllib.quote(next)) if callable(condition): flag = condition() else: flag = condition if not flag: current.session.flash = self.messages.access_denied return call_or_redirect( self.settings.on_failed_authorization) return action(*a, **b) f.__doc__ = action.__doc__ f.__name__ = action.__name__ f.__dict__.update(action.__dict__) return f return decorator def requires_login(self, otherwise=None): """ Decorator that prevents access to action if not logged in """ return self.requires(True, otherwise=otherwise) def requires_login_or_token(self, otherwise=None): if self.settings.enable_tokens == True: user = None request = current.request token = request.env.http_web2py_user_token or request.vars._token table_token = self.table_token() table_user = self.table_user() from gluon.settings import global_settings if global_settings.web2py_runtime_gae: row = table_token(token=token) if row: user = table_user(row.user_id) else: row = self.db(table_token.token==token)(table_user.id==table_token.user_id).select().first() if row: user = row[table_user._tablename] if user: self.login_user(user) return self.requires(True, otherwise=otherwise) def requires_membership(self, role=None, group_id=None, otherwise=None): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def has_membership(self=self, group_id=group_id, role=role): return self.has_membership(group_id=group_id, role=role) return self.requires(has_membership, otherwise=otherwise) def requires_permission(self, name, table_name='', record_id=0, otherwise=None): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'. """ def has_permission(self=self, name=name, table_name=table_name, record_id=record_id): return self.has_permission(name, table_name, record_id) return self.requires(has_permission, otherwise=otherwise) def requires_signature(self, otherwise=None, hash_vars=True): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. """ def verify(): return URL.verify(current.request, user_signature=True, hash_vars=hash_vars) return self.requires(verify, otherwise) def add_group(self, role, description=''): """ Creates a group associated to a role """ group_id = self.table_group().insert( role=role, description=description) self.log_event(self.messages['add_group_log'], dict(group_id=group_id, role=role)) return group_id def del_group(self, group_id): """ Deletes a group """ self.db(self.table_group().id == group_id).delete() self.db(self.table_membership().group_id == group_id).delete() self.db(self.table_permission().group_id == group_id).delete() if group_id in self.user_groups: del self.user_groups[group_id] self.log_event(self.messages.del_group_log, dict(group_id=group_id)) def id_group(self, role): """ Returns the group_id of the group specified by the role """ rows = self.db(self.table_group().role == role).select() if not rows: return None return rows[0].id def user_group(self, user_id=None): """ Returns the group_id of the group uniquely associated to this user i.e. `role=user:[user_id]` """ return self.id_group(self.user_group_role(user_id)) def user_group_role(self, user_id=None): if not self.settings.create_user_groups: return None if user_id: user = self.table_user()[user_id] else: user = self.user return self.settings.create_user_groups % user def has_membership(self, group_id=None, user_id=None, role=None): """ Checks if user is member of group_id or role """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() if group_id and user_id and self.db((membership.user_id == user_id) & (membership.group_id == group_id)).select(): r = True else: r = False self.log_event(self.messages['has_membership_log'], dict(user_id=user_id, group_id=group_id, check=r)) return r def add_membership(self, group_id=None, user_id=None, role=None): """ Gives user_id membership of group_id or role if user is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) try: group_id = int(group_id) except: group_id = self.id_group(group_id) # interpret group_id as a role if not user_id and self.user: user_id = self.user.id membership = self.table_membership() record = membership(user_id=user_id, group_id=group_id) if record: return record.id else: id = membership.insert(group_id=group_id, user_id=user_id) if role: self.user_groups[group_id] = role else: self.update_groups() self.log_event(self.messages['add_membership_log'], dict(user_id=user_id, group_id=group_id)) return id def del_membership(self, group_id=None, user_id=None, role=None): """ Revokes membership from group_id to user_id if user_id is None than user_id is that of current logged in user """ group_id = group_id or self.id_group(role) if not user_id and self.user: user_id = self.user.id membership = self.table_membership() self.log_event(self.messages['del_membership_log'], dict(user_id=user_id, group_id=group_id)) ret = self.db(membership.user_id == user_id)(membership.group_id == group_id).delete() if group_id in self.user_groups: del self.user_groups[group_id] return ret def has_permission(self, name='any', table_name='', record_id=0, user_id=None, group_id=None, ): """ Checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission """ if not group_id and self.settings.everybody_group_id and \ self.has_permission( name, table_name, record_id, user_id=None, group_id=self.settings.everybody_group_id): return True if not user_id and not group_id and self.user: user_id = self.user.id if user_id: membership = self.table_membership() rows = self.db(membership.user_id == user_id).select(membership.group_id) groups = set([row.group_id for row in rows]) if group_id and not group_id in groups: return False else: groups = set([group_id]) permission = self.table_permission() rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == record_id).select(permission.group_id) groups_required = set([row.group_id for row in rows]) if record_id: rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == 0).select(permission.group_id) groups_required = groups_required.union(set([row.group_id for row in rows])) if groups.intersection(groups_required): r = True else: r = False if user_id: self.log_event(self.messages['has_permission_log'], dict(user_id=user_id, name=name, table_name=table_name, record_id=record_id)) return r def add_permission(self, group_id, name='any', table_name='', record_id=0, ): """ Gives group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() if group_id == 0: group_id = self.user_group() record = self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))( permission.record_id == long(record_id)).select(limitby=(0, 1), orderby_on_limitby=False).first() if record: id = record.id else: id = permission.insert(group_id=group_id, name=name, table_name=str(table_name), record_id=long(record_id)) self.log_event(self.messages['add_permission_log'], dict(permission_id=id, group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return id def del_permission(self, group_id, name='any', table_name='', record_id=0, ): """ Revokes group_id 'name' access to 'table_name' and 'record_id' """ permission = self.table_permission() self.log_event(self.messages['del_permission_log'], dict(group_id=group_id, name=name, table_name=table_name, record_id=record_id)) return self.db(permission.group_id == group_id)(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == long(record_id)).delete() def accessible_query(self, name, table, user_id=None): """ Returns a query with all accessible records for user_id or the current logged in user this method does not work on GAE because uses JOIN and IN Example: Use as:: db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL) """ if not user_id: user_id = self.user_id db = self.db if isinstance(table, str) and table in self.db.tables(): table = self.db[table] elif isinstance(table, (Set, Query)): # experimental: build a chained query for all tables if isinstance(table, Set): cquery = table.query else: cquery = table tablenames = db._adapter.tables(cquery) for tablename in tablenames: cquery &= self.accessible_query(name, tablename, user_id=user_id) return cquery if not isinstance(table, str) and\ self.has_permission(name, table, 0, user_id): return table.id > 0 membership = self.table_membership() permission = self.table_permission() query = table.id.belongs( db(membership.user_id == user_id) (membership.group_id == permission.group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) if self.settings.everybody_group_id: query |= table.id.belongs( db(permission.group_id == self.settings.everybody_group_id) (permission.name == name) (permission.table_name == table) ._select(permission.record_id)) return query @staticmethod def archive(form, archive_table=None, current_record='current_record', archive_current=False, fields=None): """ If you have a table (db.mytable) that needs full revision history you can just do:: form=crud.update(db.mytable,myrecord,onaccept=auth.archive) or:: form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive) crud.archive will define a new table "mytable_archive" and store a copy of the current record (if archive_current=True) or a copy of the previous record (if archive_current=False) in the newly created table including a reference to the current record. fields allows to specify extra fields that need to be archived. If you want to access such table you need to define it yourself in a model:: db.define_table('mytable_archive', Field('current_record',db.mytable), db.mytable) Notice such table includes all fields of db.mytable plus one: current_record. crud.archive does not timestamp the stored record unless your original table has a fields like:: db.define_table(..., Field('saved_on','datetime', default=request.now,update=request.now,writable=False), Field('saved_by',auth.user, default=auth.user_id,update=auth.user_id,writable=False), there is nothing special about these fields since they are filled before the record is archived. If you want to change the archive table name and the name of the reference field you can do, for example:: db.define_table('myhistory', Field('parent_record',db.mytable), db.mytable) and use it as:: form=crud.update(db.mytable,myrecord, onaccept=lambda form:crud.archive(form, archive_table=db.myhistory, current_record='parent_record')) """ if not archive_current and not form.record: return None table = form.table if not archive_table: archive_table_name = '%s_archive' % table if not archive_table_name in table._db: table._db.define_table( archive_table_name, Field(current_record, table), *[field.clone(unique=False) for field in table]) archive_table = table._db[archive_table_name] new_record = {current_record: form.vars.id} for fieldname in archive_table.fields: if not fieldname in ['id', current_record]: if archive_current and fieldname in form.vars: new_record[fieldname] = form.vars[fieldname] elif form.record and fieldname in form.record: new_record[fieldname] = form.record[fieldname] if fields: new_record.update(fields) id = archive_table.insert(**new_record) return id def wiki(self, slug=None, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, resolve=True, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None, force_render=False, groups=None): if controller and function: resolve = False if not hasattr(self, '_wiki'): self._wiki = Wiki(self, render=render, manage_permissions=manage_permissions, force_prefix=force_prefix, restrict_search=restrict_search, env=env, extra=extra or {}, menu_groups=menu_groups, templates=templates, migrate=migrate, controller=controller, function=function, groups=groups) else: self._wiki.env.update(env or {}) # if resolve is set to True, process request as wiki call # resolve=False allows initial setup without wiki redirection wiki = None if resolve: if slug: wiki = self._wiki.read(slug, force_render) if isinstance(wiki, dict) and wiki.has_key('content'): # FIXME: .has_key() is deprecated # We don't want to return a dict object, just the wiki wiki = wiki['content'] else: wiki = self._wiki() if isinstance(wiki, basestring): wiki = XML(wiki) return wiki def wikimenu(self): """To be used in menu.py for app wide wiki menus""" if (hasattr(self, "_wiki") and self._wiki.settings.controller and self._wiki.settings.function): self._wiki.automenu() class Crud(object): def url(self, f=None, args=None, vars=None): """ This should point to the controller that exposes download and crud """ if args is None: args = [] if vars is None: vars = {} return URL(c=self.settings.controller, f=f, args=args, vars=vars) def __init__(self, environment, db=None, controller='default'): self.db = db if not db and environment and isinstance(environment, DAL): self.db = environment elif not db: raise SyntaxError("must pass db as first or second argument") self.environment = current settings = self.settings = Settings() settings.auth = None settings.logger = None settings.create_next = None settings.update_next = None settings.controller = controller settings.delete_next = self.url() settings.download_url = self.url('download') settings.create_onvalidation = StorageList() settings.update_onvalidation = StorageList() settings.delete_onvalidation = StorageList() settings.create_onaccept = StorageList() settings.update_onaccept = StorageList() settings.update_ondelete = StorageList() settings.delete_onaccept = StorageList() settings.update_deletable = True settings.showid = False settings.keepvalues = False settings.create_captcha = None settings.update_captcha = None settings.captcha = None settings.formstyle = 'table3cols' settings.label_separator = ': ' settings.hideerror = False settings.detect_record_change = True settings.hmac_key = None settings.lock_keys = True messages = self.messages = Messages(current.T) messages.submit_button = 'Submit' messages.delete_label = 'Check to delete' messages.record_created = 'Record Created' messages.record_updated = 'Record Updated' messages.record_deleted = 'Record Deleted' messages.update_log = 'Record %(id)s updated' messages.create_log = 'Record %(id)s created' messages.read_log = 'Record %(id)s read' messages.delete_log = 'Record %(id)s deleted' messages.lock_keys = True def __call__(self): args = current.request.args if len(args) < 1: raise HTTP(404) elif args[0] == 'tables': return self.tables() elif len(args) > 1 and not args(1) in self.db.tables: raise HTTP(404) table = self.db[args(1)] if args[0] == 'create': return self.create(table) elif args[0] == 'select': return self.select(table, linkto=self.url(args='read')) elif args[0] == 'search': form, rows = self.search(table, linkto=self.url(args='read')) return DIV(form, SQLTABLE(rows)) elif args[0] == 'read': return self.read(table, args(2)) elif args[0] == 'update': return self.update(table, args(2)) elif args[0] == 'delete': return self.delete(table, args(2)) else: raise HTTP(404) def log_event(self, message, vars): if self.settings.logger: self.settings.logger.log_event(message, vars, origin='crud') def has_permission(self, name, table, record=0): if not self.settings.auth: return True try: record_id = record.id except: record_id = record return self.settings.auth.has_permission(name, str(table), record_id) def tables(self): return TABLE(*[TR(A(name, _href=self.url(args=('select', name)))) for name in self.db.tables]) @staticmethod def archive(form, archive_table=None, current_record='current_record'): return Auth.archive(form, archive_table=archive_table, current_record=current_record) def update(self, table, record, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, ondelete=DEFAULT, log=DEFAULT, message=DEFAULT, deletable=DEFAULT, formname=DEFAULT, **attributes ): if not (isinstance(table, Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] try: record_id = record.id except: record_id = record or 0 if record_id and not self.has_permission('update', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) if not record_id and not self.has_permission('create', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request response = current.response session = current.session if request.extension == 'json' and request.vars.json: request.vars.update(json_parser.loads(request.vars.json)) if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.update_next if onvalidation is DEFAULT: onvalidation = self.settings.update_onvalidation if onaccept is DEFAULT: onaccept = self.settings.update_onaccept if ondelete is DEFAULT: ondelete = self.settings.update_ondelete if log is DEFAULT: log = self.messages['update_log'] if deletable is DEFAULT: deletable = self.settings.update_deletable if message is DEFAULT: message = self.messages.record_updated if not 'hidden' in attributes: attributes['hidden'] = {} attributes['hidden']['_next'] = next form = SQLFORM( table, record, showid=self.settings.showid, submit_button=self.messages.submit_button, delete_label=self.messages.delete_label, deletable=deletable, upload=self.settings.download_url, formstyle=self.settings.formstyle, separator=self.settings.label_separator, **attributes # contains hidden ) self.accepted = False self.deleted = False captcha = self.settings.update_captcha or self.settings.captcha if record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') captcha = self.settings.create_captcha or self.settings.captcha if not record and captcha: addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row') if not request.extension in ('html', 'load'): (_session, _formname) = (None, None) else: (_session, _formname) = ( session, '%s/%s' % (table._tablename, form.record_id)) if not formname is DEFAULT: _formname = formname keepvalues = self.settings.keepvalues if request.vars.delete_this_record: keepvalues = False if isinstance(onvalidation, StorageList): onvalidation = onvalidation.get(table._tablename, []) if form.accepts(request, _session, formname=_formname, onvalidation=onvalidation, keepvalues=keepvalues, hideerror=self.settings.hideerror, detect_record_change=self.settings.detect_record_change): self.accepted = True response.flash = message if log: self.log_event(log, form.vars) if request.vars.delete_this_record: self.deleted = True message = self.messages.record_deleted callback(ondelete, form, table._tablename) response.flash = message callback(onaccept, form, table._tablename) if not request.extension in ('html', 'load'): raise HTTP(200, 'RECORD CREATED/UPDATED') if isinstance(next, (list, tuple)): # fix issue with 2.6 next = next[0] if next: # Only redirect when explicit next = replace_id(next, form) session.flash = response.flash redirect(next) elif not request.extension in ('html', 'load'): raise HTTP(401, serializers.json(dict(errors=form.errors))) return form def create(self, table, next=DEFAULT, onvalidation=DEFAULT, onaccept=DEFAULT, log=DEFAULT, message=DEFAULT, formname=DEFAULT, **attributes ): if next is DEFAULT: next = self.settings.create_next if onvalidation is DEFAULT: onvalidation = self.settings.create_onvalidation if onaccept is DEFAULT: onaccept = self.settings.create_onaccept if log is DEFAULT: log = self.messages['create_log'] if message is DEFAULT: message = self.messages.record_created return self.update( table, None, next=next, onvalidation=onvalidation, onaccept=onaccept, log=log, message=message, deletable=False, formname=formname, **attributes ) def read(self, table, record): if not (isinstance(table, Table) or table in self.db.tables) \ or (isinstance(record, str) and not str(record).isdigit()): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] if not self.has_permission('read', table, record): redirect(self.settings.auth.settings.on_failed_authorization) form = SQLFORM( table, record, readonly=True, comments=False, upload=self.settings.download_url, showid=self.settings.showid, formstyle=self.settings.formstyle, separator=self.settings.label_separator ) if not current.request.extension in ('html', 'load'): return table._filter_fields(form.record, id=True) return form def delete(self, table, record_id, next=DEFAULT, message=DEFAULT, ): if not (isinstance(table, Table) or table in self.db.tables): raise HTTP(404) if not isinstance(table, Table): table = self.db[table] if not self.has_permission('delete', table, record_id): redirect(self.settings.auth.settings.on_failed_authorization) request = current.request session = current.session if next is DEFAULT: next = request.get_vars._next \ or request.post_vars._next \ or self.settings.delete_next if message is DEFAULT: message = self.messages.record_deleted record = table[record_id] if record: callback(self.settings.delete_onvalidation, record) del table[record_id] callback(self.settings.delete_onaccept, record, table._tablename) session.flash = message redirect(next) def rows( self, table, query=None, fields=None, orderby=None, limitby=None, ): if not (isinstance(table, Table) or table in self.db.tables): raise HTTP(404) if not self.has_permission('select', table): redirect(self.settings.auth.settings.on_failed_authorization) #if record_id and not self.has_permission('select', table): # redirect(self.settings.auth.settings.on_failed_authorization) if not isinstance(table, Table): table = self.db[table] if not query: query = table.id > 0 if not fields: fields = [field for field in table if field.readable] else: fields = [table[f] if isinstance(f, str) else f for f in fields] rows = self.db(query).select(*fields, **dict(orderby=orderby, limitby=limitby)) return rows def select(self, table, query=None, fields=None, orderby=None, limitby=None, headers=None, **attr ): headers = headers or {} rows = self.rows(table, query, fields, orderby, limitby) if not rows: return None # Nicer than an empty table. if not 'upload' in attr: attr['upload'] = self.url('download') if not current.request.extension in ('html', 'load'): return rows.as_list() if not headers: if isinstance(table, str): table = self.db[table] headers = dict((str(k), k.label) for k in table) return SQLTABLE(rows, headers=headers, **attr) def get_format(self, field): rtable = field._db[field.type[10:]] format = rtable.get('_format', None) if format and isinstance(format, str): return format[2:-2] return field.name def get_query(self, field, op, value, refsearch=False): try: if refsearch: format = self.get_format(field) if op == 'equals': if not refsearch: return field == value else: return lambda row: row[field.name][format] == value elif op == 'not equal': if not refsearch: return field != value else: return lambda row: row[field.name][format] != value elif op == 'greater than': if not refsearch: return field > value else: return lambda row: row[field.name][format] > value elif op == 'less than': if not refsearch: return field < value else: return lambda row: row[field.name][format] < value elif op == 'starts with': if not refsearch: return field.like(value + '%') else: return lambda row: str(row[field.name][format]).startswith(value) elif op == 'ends with': if not refsearch: return field.like('%' + value) else: return lambda row: str(row[field.name][format]).endswith(value) elif op == 'contains': if not refsearch: return field.like('%' + value + '%') else: return lambda row: value in row[field.name][format] except: return None def search(self, *tables, **args): """ Creates a search form and its results for a table Examples: Use as:: form, results = crud.search(db.test, queries = ['equals', 'not equal', 'contains'], query_labels={'equals':'Equals', 'not equal':'Not equal'}, fields = ['id','children'], field_labels = { 'id':'ID','children':'Children'}, zero='Please choose', query = (db.test.id > 0)&(db.test.id != 3) ) """ table = tables[0] fields = args.get('fields', table.fields) validate = args.get('validate', True) request = current.request db = self.db if not (isinstance(table, Table) or table in db.tables): raise HTTP(404) attributes = {} for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'): if key in args: attributes[key] = args[key] tbl = TABLE() selected = [] refsearch = [] results = [] showall = args.get('showall', False) if showall: selected = fields chkall = args.get('chkall', False) if chkall: for f in fields: request.vars['chk%s' % f] = 'on' ops = args.get('queries', []) zero = args.get('zero', '') if not ops: ops = ['equals', 'not equal', 'greater than', 'less than', 'starts with', 'ends with', 'contains'] ops.insert(0, zero) query_labels = args.get('query_labels', {}) query = args.get('query', table.id > 0) field_labels = args.get('field_labels', {}) for field in fields: field = table[field] if not field.readable: continue fieldname = field.name chkval = request.vars.get('chk' + fieldname, None) txtval = request.vars.get('txt' + fieldname, None) opval = request.vars.get('op' + fieldname, None) row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname, _disabled=(field.type == 'id'), value=(field.type == 'id' or chkval == 'on'))), TD(field_labels.get(fieldname, field.label)), TD(SELECT([OPTION(query_labels.get(op, op), _value=op) for op in ops], _name="op" + fieldname, value=opval)), TD(INPUT(_type="text", _name="txt" + fieldname, _value=txtval, _id='txt' + fieldname, _class=str(field.type)))) tbl.append(row) if request.post_vars and (chkval or field.type == 'id'): if txtval and opval != '': if field.type[0:10] == 'reference ': refsearch.append(self.get_query(field, opval, txtval, refsearch=True)) elif validate: value, error = field.validate(txtval) if not error: ### TODO deal with 'starts with', 'ends with', 'contains' on GAE query &= self.get_query(field, opval, value) else: row[3].append(DIV(error, _class='error')) else: query &= self.get_query(field, opval, txtval) selected.append(field) form = FORM(tbl, INPUT(_type="submit")) if selected: try: results = db(query).select(*selected, **attributes) for r in refsearch: results = results.find(r) except: # hmmm, we should do better here results = None return form, results urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor())) def fetch(url, data=None, headers=None, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): headers = headers or {} if not data is None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join( ['%s=%s;' % (c.key, c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data is None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False, follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html regex_geocode = \ re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""") def geocode(address): try: a = urllib.quote(address) txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s' % a) item = regex_geocode.search(txt) (la, lo) = (float(item.group('la')), float(item.group('lo'))) return (la, lo) except: return (0.0, 0.0) def reverse_geocode(lat, lng, lang=None): """ Try to get an approximate address for a given latitude, longitude. """ if not lang: lang = current.T.accepted_language try: return json_parser.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address'] except: return '' def universal_caller(f, *a, **b): c = f.func_code.co_argcount n = f.func_code.co_varnames[:c] defaults = f.func_defaults or [] pos_args = n[0:-len(defaults)] named_args = n[-len(defaults):] arg_dict = {} # Fill the arg_dict with name and value for the submitted, positional values for pos_index, pos_val in enumerate(a[:c]): arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument # There might be pos_args left, that are sent as named_values. Gather them as well. # If a argument already is populated with values we simply replaces them. for arg_name in pos_args[len(arg_dict):]: if arg_name in b: arg_dict[arg_name] = b[arg_name] if len(arg_dict) >= len(pos_args): # All the positional arguments is found. The function may now be called. # However, we need to update the arg_dict with the values from the named arguments as well. for arg_name in named_args: if arg_name in b: arg_dict[arg_name] = b[arg_name] return f(**arg_dict) # Raise an error, the function cannot be called. raise HTTP(404, "Object does not exist") class Service(object): def __init__(self, environment=None): self.run_procedures = {} self.csv_procedures = {} self.xml_procedures = {} self.rss_procedures = {} self.json_procedures = {} self.jsonrpc_procedures = {} self.jsonrpc2_procedures = {} self.xmlrpc_procedures = {} self.amfrpc_procedures = {} self.amfrpc3_procedures = {} self.soap_procedures = {} def run(self, f): """ Example: Use as:: service = Service() @service.run def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/run/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def csv(self, f): """ Example: Use as:: service = Service() @service.csv def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/csv/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def xml(self, f): """ Example: Use as:: service = Service() @service.xml def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/xml/myfunction?a=3&b=4 """ self.run_procedures[f.__name__] = f return f def rss(self, f): """ Example: Use as:: service = Service() @service.rss def myfunction(): return dict(title=..., link=..., description=..., created_on=..., entries=[dict(title=..., link=..., description=..., created_on=...]) def call(): return service() Then call it with: wget http://..../app/default/call/rss/myfunction """ self.rss_procedures[f.__name__] = f return f def json(self, f): """ Example: Use as:: service = Service() @service.json def myfunction(a, b): return [{a: b}] def call(): return service() Then call it with:; wget http://..../app/default/call/json/myfunction?a=hello&b=world """ self.json_procedures[f.__name__] = f return f def jsonrpc(self, f): """ Example: Use as:: service = Service() @service.jsonrpc def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world """ self.jsonrpc_procedures[f.__name__] = f return f def jsonrpc2(self, f): """ Example: Use as:: service = Service() @service.jsonrpc2 def myfunction(a, b): return a + b def call(): return service() Then call it with: wget --post-data '{"jsonrpc": "2.0", "id": 1, "method": "myfunction", "params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2 """ self.jsonrpc2_procedures[f.__name__] = f return f def xmlrpc(self, f): """ Example: Use as:: service = Service() @service.xmlrpc def myfunction(a, b): return a + b def call(): return service() The call it with: wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world """ self.xmlrpc_procedures[f.__name__] = f return f def amfrpc(self, f): """ Example: Use as:: service = Service() @service.amfrpc def myfunction(a, b): return a + b def call(): return service() Then call it with:: wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world """ self.amfrpc_procedures[f.__name__] = f return f def amfrpc3(self, domain='default'): """ Example: Use as:: service = Service() @service.amfrpc3('domain') def myfunction(a, b): return a + b def call(): return service() Then call it with: wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world """ if not isinstance(domain, str): raise SyntaxError("AMF3 requires a domain for function") def _amfrpc3(f): if domain: self.amfrpc3_procedures[domain + '.' + f.__name__] = f else: self.amfrpc3_procedures[f.__name__] = f return f return _amfrpc3 def soap(self, name=None, returns=None, args=None, doc=None): """ Example: Use as:: service = Service() @service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,}) def myfunction(a, b): return a + b def call(): return service() Then call it with:: from gluon.contrib.pysimplesoap.client import SoapClient client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL") response = client.MyFunction(a=1,b=2) return response['result'] It also exposes online generated documentation and xml example messages at `http://..../app/default/call/soap` """ def _soap(f): self.soap_procedures[name or f.__name__] = f, returns, args, doc return f return _soap def serve_run(self, args=None): request = current.request if not args: args = request.args if args and args[0] in self.run_procedures: return str(universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars))) self.error() def serve_csv(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/x-csv' if not args: args = request.args def none_exception(value): if isinstance(value, unicode): return value.encode('utf8') if hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') if value is None: return '<NULL>' return value if args and args[0] in self.run_procedures: import types r = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) s = cStringIO.StringIO() if hasattr(r, 'export_to_csv_file'): r.export_to_csv_file(s) elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)): import csv writer = csv.writer(s) writer.writerow(r[0].keys()) for line in r: writer.writerow([none_exception(v) for v in line.values()]) else: import csv writer = csv.writer(s) for line in r: writer.writerow(line) return s.getvalue() self.error() def serve_xml(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'text/xml' if not args: args = request.args if args and args[0] in self.run_procedures: s = universal_caller(self.run_procedures[args[0]], *args[1:], **dict(request.vars)) if hasattr(s, 'as_list'): s = s.as_list() return serializers.xml(s, quote=False) self.error() def serve_rss(self, args=None): request = current.request response = current.response if not args: args = request.args if args and args[0] in self.rss_procedures: feed = universal_caller(self.rss_procedures[args[0]], *args[1:], **dict(request.vars)) else: self.error() response.headers['Content-Type'] = 'application/rss+xml' return serializers.rss(feed) def serve_json(self, args=None): request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' if not args: args = request.args d = dict(request.vars) if args and args[0] in self.json_procedures: s = universal_caller(self.json_procedures[args[0]], *args[1:], **d) if hasattr(s, 'as_list'): s = s.as_list() return response.json(s) self.error() class JsonRpcException(Exception): def __init__(self, code, info): jrpc_error = Service.jsonrpc_errors.get(code) if jrpc_error: self.message, self.description = jrpc_error self.code, self.info = code, info # jsonrpc 2.0 error types. records the following structure {code: (message,meaning)} jsonrpc_errors = { -32700: ("Parse error. Invalid JSON was received by the server.", "An error occurred on the server while parsing the JSON text."), -32600: ("Invalid Request", "The JSON sent is not a valid Request object."), -32601: ("Method not found", "The method does not exist / is not available."), -32602: ("Invalid params", "Invalid method parameter(s)."), -32603: ("Internal error", "Internal JSON-RPC error."), -32099: ("Server error", "Reserved for implementation-defined server-errors.")} def serve_jsonrpc(self): def return_response(id, result): return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None}) def return_error(id, code, message, data=None): error = {'name': 'JSONRPCError', 'code': code, 'message': message} if data is not None: error['data'] = data return serializers.json({'id': id, 'version': '1.1', 'error': error, }) request = current.request response = current.response response.headers['Content-Type'] = 'application/json; charset=utf-8' methods = self.jsonrpc_procedures data = json_parser.loads(request.body.read()) jsonrpc_2 = data.get('jsonrpc') if jsonrpc_2: #hand over to version 2 of the protocol return self.serve_jsonrpc2(data) id, method, params = data.get('id'), data.get('method'), data.get('params', []) if id is None: return return_error(0, 100, 'missing id') if not method in methods: return return_error(id, 100, 'method "%s" does not exist' % method) try: if isinstance(params, dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() return return_response(id, s) except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except: etype, eval, etb = sys.exc_info() message = '%s: %s' % (etype.__name__, eval) data = request.is_local and traceback.format_tb(etb) logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb))) return return_error(id, 100, message, data) def serve_jsonrpc2(self, data=None, batch_element=False): def return_response(id, result): if not must_respond: return None return serializers.json({'jsonrpc': '2.0', 'id': id, 'result': result}) def return_error(id, code, message=None, data=None): error = {'code': code} if Service.jsonrpc_errors.has_key(code): error['message'] = Service.jsonrpc_errors[code][0] error['data'] = Service.jsonrpc_errors[code][1] if message is not None: error['message'] = message if data is not None: error['data'] = data return serializers.json({'jsonrpc': '2.0', 'id': id, 'error': error}) def validate(data): """ Validate request as defined in: http://www.jsonrpc.org/specification#request_object. Args: data(str): The json object. Returns: - True -- if successful - False -- if no error should be reported (i.e. data is missing 'id' member) Raises: JsonRPCException """ iparms = set(data.keys()) mandatory_args = set(['jsonrpc', 'method']) missing_args = mandatory_args - iparms if missing_args: raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args)) if data['jsonrpc'] != '2.0': raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc']) if 'id' not in iparms: return False return True request = current.request response = current.response if not data: response.headers['Content-Type'] = 'application/json; charset=utf-8' try: data = json_parser.loads(request.body.read()) except ValueError: # decoding error in json lib return return_error(None, -32700) # Batch handling if isinstance(data, list) and not batch_element: retlist = [] for c in data: retstr = self.serve_jsonrpc2(c, batch_element=True) if retstr: # do not add empty responses retlist.append(retstr) if len(retlist) == 0: # return nothing return '' else: return "[" + ','.join(retlist) + "]" methods = self.jsonrpc2_procedures methods.update(self.jsonrpc_procedures) try: must_respond = validate(data) except Service.JsonRpcException, e: return return_error(None, e.code, e.info) id, method, params = data.get('id'), data['method'], data.get('params', '') if not method in methods: return return_error(id, -32601, data='Method "%s" does not exist' % method) try: if isinstance(params, dict): s = methods[method](**params) else: s = methods[method](*params) if hasattr(s, 'as_list'): s = s.as_list() if must_respond: return return_response(id, s) else: return '' except HTTP, e: raise e except Service.JsonRpcException, e: return return_error(id, e.code, e.info) except: etype, eval, etb = sys.exc_info() data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb)) logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb))) return return_error(id, -32099, data=data) def serve_xmlrpc(self): request = current.request response = current.response services = self.xmlrpc_procedures.values() return response.xmlrpc(request, services) def serve_amfrpc(self, version=0): try: import pyamf import pyamf.remoting.gateway except: return "pyamf not installed or not in Python sys.path" request = current.request response = current.response if version == 3: services = self.amfrpc3_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) pyamf_request = pyamf.remoting.decode(request.body) else: services = self.amfrpc_procedures base_gateway = pyamf.remoting.gateway.BaseGateway(services) context = pyamf.get_context(pyamf.AMF0) pyamf_request = pyamf.remoting.decode(request.body, context) pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion) for name, message in pyamf_request: pyamf_response[name] = base_gateway.getProcessor(message)(message) response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE if version == 3: return pyamf.remoting.encode(pyamf_response).getvalue() else: return pyamf.remoting.encode(pyamf_response, context).getvalue() def serve_soap(self, version="1.1"): try: from gluon.contrib.pysimplesoap.server import SoapDispatcher except: return "pysimplesoap not installed in contrib" request = current.request response = current.response procedures = self.soap_procedures location = "%s://%s%s" % ( request.env.wsgi_url_scheme, request.env.http_host, URL(r=request, f="call/soap", vars={})) namespace = 'namespace' in response and response.namespace or location documentation = response.description or '' dispatcher = SoapDispatcher( name=response.title, location=location, action=location, # SOAPAction namespace=namespace, prefix='pys', documentation=documentation, ns=True) for method, (function, returns, args, doc) in procedures.iteritems(): dispatcher.register_function(method, function, returns, args, doc) if request.env.request_method == 'POST': fault = {} # Process normal Soap Operation response.headers['Content-Type'] = 'text/xml' xml = dispatcher.dispatch(request.body.read(), fault=fault) if fault: # May want to consider populating a ticket here... response.status = 500 # return the soap response return xml elif 'WSDL' in request.vars: # Return Web Service Description response.headers['Content-Type'] = 'text/xml' return dispatcher.wsdl() elif 'op' in request.vars: # Return method help webpage response.headers['Content-Type'] = 'text/html' method = request.vars['op'] sample_req_xml, sample_res_xml, doc = dispatcher.help(method) body = [H1("Welcome to Web2Py SOAP webservice gateway"), A("See all webservice operations", _href=URL(r=request, f="call/soap", vars={})), H2(method), P(doc), UL(LI("Location: %s" % dispatcher.location), LI("Namespace: %s" % dispatcher.namespace), LI("SoapAction: %s" % dispatcher.action), ), H3("Sample SOAP XML Request Message:"), CODE(sample_req_xml, language="xml"), H3("Sample SOAP XML Response Message:"), CODE(sample_res_xml, language="xml"), ] return {'body': body} else: # Return general help and method list webpage response.headers['Content-Type'] = 'text/html' body = [H1("Welcome to Web2Py SOAP webservice gateway"), P(response.description), P("The following operations are available"), A("See WSDL for webservice description", _href=URL(r=request, f="call/soap", vars={"WSDL":None})), UL([LI(A("%s: %s" % (method, doc or ''), _href=URL(r=request, f="call/soap", vars={'op': method}))) for method, doc in dispatcher.list_methods()]), ] return {'body': body} def __call__(self): """ Registers services with:: service = Service() @service.run @service.rss @service.json @service.jsonrpc @service.xmlrpc @service.amfrpc @service.amfrpc3('domain') @service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,}) Exposes services with:: def call(): return service() You can call services with:: http://..../app/default/call/run?[parameters] http://..../app/default/call/rss?[parameters] http://..../app/default/call/json?[parameters] http://..../app/default/call/jsonrpc http://..../app/default/call/xmlrpc http://..../app/default/call/amfrpc http://..../app/default/call/amfrpc3 http://..../app/default/call/soap """ request = current.request if len(request.args) < 1: raise HTTP(404, "Not Found") arg0 = request.args(0) if arg0 == 'run': return self.serve_run(request.args[1:]) elif arg0 == 'rss': return self.serve_rss(request.args[1:]) elif arg0 == 'csv': return self.serve_csv(request.args[1:]) elif arg0 == 'xml': return self.serve_xml(request.args[1:]) elif arg0 == 'json': return self.serve_json(request.args[1:]) elif arg0 == 'jsonrpc': return self.serve_jsonrpc() elif arg0 == 'jsonrpc2': return self.serve_jsonrpc2() elif arg0 == 'xmlrpc': return self.serve_xmlrpc() elif arg0 == 'amfrpc': return self.serve_amfrpc() elif arg0 == 'amfrpc3': return self.serve_amfrpc(3) elif arg0 == 'soap': return self.serve_soap() else: self.error() def error(self): raise HTTP(404, "Object does not exist") def completion(callback): """ Executes a task on completion of the called action. Example: Use as:: from gluon.tools import completion @completion(lambda d: logging.info(repr(d))) def index(): return dict(message='hello') It logs the output of the function every time input is called. The argument of completion is executed in a new thread. """ def _completion(f): def __completion(*a, **b): d = None try: d = f(*a, **b) return d finally: thread.start_new_thread(callback, (d,)) return __completion return _completion def prettydate(d, T=lambda x: x): if isinstance(d, datetime.datetime): dt = datetime.datetime.now() - d elif isinstance(d, datetime.date): dt = datetime.date.today() - d elif not d: return '' else: return '[invalid date]' if dt.days < 0: suffix = ' from now' dt = -dt else: suffix = ' ago' if dt.days >= 2 * 365: return T('%d years' + suffix) % int(dt.days / 365) elif dt.days >= 365: return T('1 year' + suffix) elif dt.days >= 60: return T('%d months' + suffix) % int(dt.days / 30) elif dt.days > 21: return T('1 month' + suffix) elif dt.days >= 14: return T('%d weeks' + suffix) % int(dt.days / 7) elif dt.days >= 7: return T('1 week' + suffix) elif dt.days > 1: return T('%d days' + suffix) % dt.days elif dt.days == 1: return T('1 day' + suffix) elif dt.seconds >= 2 * 60 * 60: return T('%d hours' + suffix) % int(dt.seconds / 3600) elif dt.seconds >= 60 * 60: return T('1 hour' + suffix) elif dt.seconds >= 2 * 60: return T('%d minutes' + suffix) % int(dt.seconds / 60) elif dt.seconds >= 60: return T('1 minute' + suffix) elif dt.seconds > 1: return T('%d seconds' + suffix) % dt.seconds elif dt.seconds == 1: return T('1 second' + suffix) else: return T('now') def test_thread_separation(): def f(): c = PluginManager() lock1.acquire() lock2.acquire() c.x = 7 lock1.release() lock2.release() lock1 = thread.allocate_lock() lock2 = thread.allocate_lock() lock1.acquire() thread.start_new_thread(f, ()) a = PluginManager() a.x = 5 lock1.release() lock2.acquire() return a.x class PluginManager(object): """ Plugin Manager is similar to a storage object but it is a single level singleton. This means that multiple instances within the same thread share the same attributes. Its constructor is also special. The first argument is the name of the plugin you are defining. The named arguments are parameters needed by the plugin with default values. If the parameters were previous defined, the old values are used. Example: in some general configuration file:: plugins = PluginManager() plugins.me.param1=3 within the plugin model:: _ = PluginManager('me',param1=5,param2=6,param3=7) where the plugin is used:: >>> print plugins.me.param1 3 >>> print plugins.me.param2 6 >>> plugins.me.param3 = 8 >>> print plugins.me.param3 8 Here are some tests:: >>> a=PluginManager() >>> a.x=6 >>> b=PluginManager('check') >>> print b.x 6 >>> b=PluginManager() # reset settings >>> print b.x <Storage {}> >>> b.x=7 >>> print a.x 7 >>> a.y.z=8 >>> print b.y.z 8 >>> test_thread_separation() 5 >>> plugins=PluginManager('me',db='mydb') >>> print plugins.me.db mydb >>> print 'me' in plugins True >>> print plugins.me.installed True """ instances = {} def __new__(cls, *a, **b): id = thread.get_ident() lock = thread.allocate_lock() try: lock.acquire() try: return cls.instances[id] except KeyError: instance = object.__new__(cls, *a, **b) cls.instances[id] = instance return instance finally: lock.release() def __init__(self, plugin=None, **defaults): if not plugin: self.__dict__.clear() settings = self.__getattr__(plugin) settings.installed = True settings.update( (k, v) for k, v in defaults.items() if not k in settings) def __getattr__(self, key): if not key in self.__dict__: self.__dict__[key] = Storage() return self.__dict__[key] def keys(self): return self.__dict__.keys() def __contains__(self, key): return key in self.__dict__ class Expose(object): def __init__(self, base=None, basename=None, extensions=None, allow_download=True): """ Examples: Use as:: def static(): return dict(files=Expose()) or:: def static(): path = os.path.join(request.folder,'static','public') return dict(files=Expose(path,basename='public')) Args: extensions: an optional list of file extensions for filtering displayed files: e.g. `['.py', '.jpg']` allow_download: whether to allow downloading selected files """ current.session.forget() base = base or os.path.join(current.request.folder, 'static') basename = basename or current.request.function self.basename = basename if current.request.raw_args: self.args = [arg for arg in current.request.raw_args.split('/') if arg] else: self.args = [arg for arg in current.request.args if arg] filename = os.path.join(base, *self.args) if not os.path.exists(filename): raise HTTP(404, "FILE NOT FOUND") if not os.path.normpath(filename).startswith(base): raise HTTP(401, "NOT AUTHORIZED") if allow_download and not os.path.isdir(filename): current.response.headers['Content-Type'] = contenttype(filename) raise HTTP(200, open(filename, 'rb'), **current.response.headers) self.path = path = os.path.join(filename, '*') self.folders = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if os.path.isdir(f) and not self.isprivate(f)] self.filenames = [f[len(path) - 1:] for f in sorted(glob.glob(path)) if not os.path.isdir(f) and not self.isprivate(f)] if 'README' in self.filenames: readme = open(os.path.join(filename, 'README')).read() self.paragraph = MARKMIN(readme) else: self.paragraph = None if extensions: self.filenames = [f for f in self.filenames if os.path.splitext(f)[-1] in extensions] def breadcrumbs(self, basename): path = [] span = SPAN() span.append(A(basename, _href=URL())) for arg in self.args: span.append('/') path.append(arg) span.append(A(arg, _href=URL(args='/'.join(path)))) return span def table_folders(self): if self.folders: return SPAN(H3('Folders'), TABLE( *[TR(TD(A(folder, _href=URL(args=self.args + [folder])))) for folder in self.folders], **dict(_class="table"))) return '' @staticmethod def isprivate(f): return 'private' in f or f.startswith('.') or f.endswith('~') @staticmethod def isimage(f): return os.path.splitext(f)[-1].lower() in ( '.png', '.jpg', '.jpeg', '.gif', '.tiff') def table_files(self, width=160): if self.filenames: return SPAN(H3('Files'), TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))), TD(IMG(_src=URL(args=self.args + [f]), _style='max-width:%spx' % width) if width and self.isimage(f) else '')) for f in self.filenames], **dict(_class="table"))) return '' def xml(self): return DIV( H2(self.breadcrumbs(self.basename)), self.paragraph or '', self.table_folders(), self.table_files()).xml() class Wiki(object): everybody = 'everybody' rows_page = 25 def markmin_base(self, body): return MARKMIN(body, extra=self.settings.extra, url=True, environment=self.env, autolinks=lambda link: expand_one(link, {})).xml() def render_tags(self, tags): return DIV( _class='w2p_wiki_tags', *[A(t.strip(), _href=URL(args='_search', vars=dict(q=t))) for t in tags or [] if t.strip()]) def markmin_render(self, page): return self.markmin_base(page.body) + self.render_tags(page.tags).xml() def html_render(self, page): html = page.body # @///function -> http://..../function html = replace_at_urls(html, URL) # http://...jpg -> <img src="http://...jpg/> or embed html = replace_autolinks(html, lambda link: expand_one(link, {})) # @{component:name} -> <script>embed component name</script> html = replace_components(html, self.env) html = html + self.render_tags(page.tags).xml() return html @staticmethod def component(text): """ In wiki docs allows `@{component:controller/function/args}` which renders as a `LOAD(..., ajax=True)` """ items = text.split('/') controller, function, args = items[0], items[1], items[2:] return LOAD(controller, function, args=args, ajax=True).xml() def get_renderer(self): if isinstance(self.settings.render, basestring): r = getattr(self, "%s_render" % self.settings.render) elif callable(self.settings.render): r = self.settings.render elif isinstance(self.settings.render, dict): def custom_render(page): if page.render: if page.render in self.settings.render.keys(): my_render = self.settings.render[page.render] else: my_render = getattr(self, "%s_render" % page.render) else: my_render = self.markmin_render return my_render(page) r = custom_render else: raise ValueError( "Invalid render type %s" % type(self.settings.render)) return r def __init__(self, auth, env=None, render='markmin', manage_permissions=False, force_prefix='', restrict_search=False, extra=None, menu_groups=None, templates=None, migrate=True, controller=None, function=None, groups=None): settings = self.settings = auth.settings.wiki """ Args: render: - "markmin" - "html" - `<function>` : Sets a custom render function - `dict(html=<function>, markmin=...)`: dict(...) allows multiple custom render functions - "multiple" : Is the same as `{}`. It enables per-record formats using builtins """ engines = set(['markmin', 'html']) show_engine = False if render == "multiple": render = {} if isinstance(render, dict): [engines.add(key) for key in render] show_engine = True settings.render = render perms = settings.manage_permissions = manage_permissions settings.force_prefix = force_prefix settings.restrict_search = restrict_search settings.extra = extra or {} settings.menu_groups = menu_groups settings.templates = templates settings.controller = controller settings.function = function settings.groups = auth.user_groups.values() \ if groups is None else groups db = auth.db self.env = env or {} self.env['component'] = Wiki.component self.auth = auth self.wiki_menu_items = None if self.auth.user: self.settings.force_prefix = force_prefix % self.auth.user else: self.settings.force_prefix = force_prefix self.host = current.request.env.http_host table_definitions = [ ('wiki_page', { 'args': [ Field('slug', requires=[IS_SLUG(), IS_NOT_IN_DB(db, 'wiki_page.slug')], writable=False), Field('title', length=255, unique=True), Field('body', 'text', notnull=True), Field('tags', 'list:string'), Field('can_read', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('can_edit', 'list:string', writable=perms, readable=perms, default=[Wiki.everybody]), Field('changelog'), Field('html', 'text', compute=self.get_renderer(), readable=False, writable=False), Field('render', default="markmin", readable=show_engine, writable=show_engine, requires=IS_EMPTY_OR( IS_IN_SET(engines))), auth.signature], 'vars': {'format': '%(title)s', 'migrate': migrate}}), ('wiki_tag', { 'args': [ Field('name'), Field('wiki_page', 'reference wiki_page'), auth.signature], 'vars':{'format': '%(title)s', 'migrate': migrate}}), ('wiki_media', { 'args': [ Field('wiki_page', 'reference wiki_page'), Field('title', required=True), Field('filename', 'upload', required=True), auth.signature], 'vars': {'format': '%(title)s', 'migrate': migrate}}), ] # define only non-existent tables for key, value in table_definitions: args = [] if not key in db.tables(): # look for wiki_ extra fields in auth.settings extra_fields = auth.settings.extra_fields if extra_fields: if key in extra_fields: if extra_fields[key]: for field in extra_fields[key]: args.append(field) args += value['args'] db.define_table(key, *args, **value['vars']) if self.settings.templates is None and not \ self.settings.manage_permissions: self.settings.templates = db.wiki_page.tags.contains('template') & \ db.wiki_page.can_read.contains('everybody') def update_tags_insert(page, id, db=db): for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=id) def update_tags_update(dbset, page, db=db): page = dbset.select(limitby=(0, 1)).first() db(db.wiki_tag.wiki_page == page.id).delete() for tag in page.tags or []: tag = tag.strip().lower() if tag: db.wiki_tag.insert(name=tag, wiki_page=page.id) db.wiki_page._after_insert.append(update_tags_insert) db.wiki_page._after_update.append(update_tags_update) if (auth.user and check_credentials(current.request, gae_login=False) and not 'wiki_editor' in auth.user_groups.values() and self.settings.groups == auth.user_groups.values()): group = db.auth_group(role='wiki_editor') gid = group.id if group else db.auth_group.insert( role='wiki_editor') auth.add_membership(gid) settings.lock_keys = True # WIKI ACCESS POLICY def not_authorized(self, page=None): raise HTTP(401) def can_read(self, page): if 'everybody' in page.can_read or not \ self.settings.manage_permissions: return True elif self.auth.user: groups = self.settings.groups if ('wiki_editor' in groups or set(groups).intersection(set(page.can_read + page.can_edit)) or page.created_by == self.auth.user.id): return True return False def can_edit(self, page=None): if not self.auth.user: redirect(self.auth.settings.login_url) groups = self.settings.groups return ('wiki_editor' in groups or (page is None and 'wiki_author' in groups) or not page is None and ( set(groups).intersection(set(page.can_edit)) or page.created_by == self.auth.user.id)) def can_manage(self): if not self.auth.user: return False groups = self.settings.groups return 'wiki_editor' in groups def can_search(self): return True def can_see_menu(self): if self.auth.user: if self.settings.menu_groups is None: return True else: groups = self.settings.groups if any(t in self.settings.menu_groups for t in groups): return True return False ### END POLICY def automenu(self): """adds the menu if not present""" if (not self.wiki_menu_items and self.settings.controller and self.settings.function): self.wiki_menu_items = self.menu(self.settings.controller, self.settings.function) current.response.menu += self.wiki_menu_items def __call__(self): request = current.request settings = self.settings settings.controller = settings.controller or request.controller settings.function = settings.function or request.function self.automenu() zero = request.args(0) or 'index' if zero and zero.isdigit(): return self.media(int(zero)) elif not zero or not zero.startswith('_'): return self.read(zero) elif zero == '_edit': return self.edit(request.args(1) or 'index', request.args(2) or 0) elif zero == '_editmedia': return self.editmedia(request.args(1) or 'index') elif zero == '_create': return self.create() elif zero == '_pages': return self.pages() elif zero == '_search': return self.search() elif zero == '_recent': ipage = int(request.vars.page or 0) query = self.auth.db.wiki_page.created_by == request.args( 1, cast=int) return self.search(query=query, orderby=~self.auth.db.wiki_page.created_on, limitby=(ipage * self.rows_page, (ipage + 1) * self.rows_page), ) elif zero == '_cloud': return self.cloud() elif zero == '_preview': return self.preview(self.get_renderer()) def first_paragraph(self, page): if not self.can_read(page): mm = (page.body or '').replace('\r', '') ps = [p for p in mm.split('\n\n') if not p.startswith('#') and p.strip()] if ps: return ps[0] return '' def fix_hostname(self, body): return (body or '').replace('://HOSTNAME', '://%s' % self.host) def read(self, slug, force_render=False): if slug in '_cloud': return self.cloud() elif slug in '_search': return self.search() page = self.auth.db.wiki_page(slug=slug) if page and (not self.can_read(page)): return self.not_authorized(page) if current.request.extension == 'html': if not page: url = URL(args=('_create', slug)) return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn")) else: html = page.html if not force_render else self.get_renderer()(page) content = XML(self.fix_hostname(html)) return dict(title=page.title, slug=page.slug, page=page, content=content, tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) elif current.request.extension == 'load': return self.fix_hostname(page.html) if page else '' else: if not page: raise HTTP(404) else: return dict(title=page.title, slug=page.slug, page=page, content=page.body, tags=page.tags, created_on=page.created_on, modified_on=page.modified_on) def edit(self, slug, from_template=0): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not self.can_edit(page): return self.not_authorized(page) title_guess = ' '.join(c.capitalize() for c in slug.split('-')) if not page: if not (self.can_manage() or slug.startswith(self.settings.force_prefix)): current.session.flash = 'slug must have "%s" prefix' \ % self.settings.force_prefix redirect(URL(args=('_create'))) db.wiki_page.can_read.default = [Wiki.everybody] db.wiki_page.can_edit.default = [auth.user_group_role()] db.wiki_page.title.default = title_guess db.wiki_page.slug.default = slug if slug == 'wiki-menu': db.wiki_page.body.default = \ '- Menu Item > @////index\n- - Submenu > http://web2py.com' else: db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \ if int(from_template) > 0 else '## %s\n\npage content' % title_guess vars = current.request.post_vars if vars.body: vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME') form = SQLFORM(db.wiki_page, page, deletable=True, formstyle='table2cols', showid=False).process() if form.deleted: current.session.flash = 'page deleted' redirect(URL()) elif form.accepted: current.session.flash = 'page created' redirect(URL(args=slug)) script = """ jQuery(function() { if (!jQuery('#wiki_page_body').length) return; var pagecontent = jQuery('#wiki_page_body'); pagecontent.css('font-family', 'Monaco,Menlo,Consolas,"Courier New",monospace'); var prevbutton = jQuery('<button class="btn nopreview">Preview</button>'); var preview = jQuery('<div id="preview"></div>').hide(); var previewmedia = jQuery('<div id="previewmedia"></div>'); var form = pagecontent.closest('form'); preview.insertBefore(form); prevbutton.insertBefore(form); if(%(link_media)s) { var mediabutton = jQuery('<button class="btn nopreview">Media</button>'); mediabutton.insertBefore(form); previewmedia.insertBefore(form); mediabutton.click(function() { if (mediabutton.hasClass('nopreview')) { web2py_component('%(urlmedia)s', 'previewmedia'); } else { previewmedia.empty(); } mediabutton.toggleClass('nopreview'); }); } prevbutton.click(function(e) { e.preventDefault(); if (prevbutton.hasClass('nopreview')) { prevbutton.addClass('preview').removeClass( 'nopreview').html('Edit Source'); try{var wiki_render = jQuery('#wiki_page_render').val()} catch(e){var wiki_render = null;} web2py_ajax_page('post', \ '%(url)s', {body: jQuery('#wiki_page_body').val(), \ render: wiki_render}, 'preview'); form.fadeOut('fast', function() {preview.fadeIn()}); } else { prevbutton.addClass( 'nopreview').removeClass('preview').html('Preview'); preview.fadeOut('fast', function() {form.fadeIn()}); } }) }) """ % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'), urlmedia=URL(extension='load', args=('_editmedia', slug), vars=dict(embedded=1))) return dict(content=TAG[''](form, SCRIPT(script))) def editmedia(self, slug): auth = self.auth db = auth.db page = db.wiki_page(slug=slug) if not (page and self.can_edit(page)): return self.not_authorized(page) self.auth.db.wiki_media.id.represent = lambda id, row: \ id if not row.filename else \ SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1])) self.auth.db.wiki_media.wiki_page.default = page.id self.auth.db.wiki_media.wiki_page.writable = False links = [] csv = True create = True if current.request.vars.embedded: script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;" fragment = self.auth.db.wiki_media.id.represent csv = False create = False links= [ lambda row: A('copy into source', _href='#', _onclick=script % (fragment(row.id, row))) ] content = SQLFORM.grid( self.auth.db.wiki_media.wiki_page == page.id, orderby=self.auth.db.wiki_media.title, links=links, csv=csv, create=create, args=['_editmedia', slug], user_signature=False) return dict(content=content) def create(self): if not self.can_edit(): return self.not_authorized() db = self.auth.db slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug) options = [OPTION(row.slug, _value=row.id) for row in slugs] options.insert(0, OPTION('', _value='')) fields = [Field("slug", default=current.request.args(1) or self.settings.force_prefix, requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))),] if self.settings.templates: fields.append( Field("from_template", "reference wiki_page", requires=IS_EMPTY_OR( IS_IN_DB(db(self.settings.templates), db.wiki_page._id, '%(slug)s')), comment=current.T( "Choose Template or empty for new Page"))) form = SQLFORM.factory(*fields, **dict(_class="well")) form.element("[type=submit]").attributes["_value"] = \ current.T("Create Page from Slug") if form.process().accepted: form.vars.from_template = 0 if not form.vars.from_template \ else form.vars.from_template redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param return dict(content=form) def pages(self): if not self.can_manage(): return self.not_authorized() self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN( '@////%s' % slug) self.auth.db.wiki_page.title.represent = lambda title, row: \ A(title, _href=URL(args=row.slug)) wiki_table = self.auth.db.wiki_page content = SQLFORM.grid( wiki_table, fields=[wiki_table.slug, wiki_table.title, wiki_table.tags, wiki_table.can_read, wiki_table.can_edit], links=[ lambda row: A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'), lambda row: A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')], details=False, editable=False, deletable=False, create=False, orderby=self.auth.db.wiki_page.title, args=['_pages'], user_signature=False) return dict(content=content) def media(self, id): request, response, db = current.request, current.response, self.auth.db media = db.wiki_media(id) if media: if self.settings.manage_permissions: page = db.wiki_page(media.wiki_page) if not self.can_read(page): return self.not_authorized(page) request.args = [media.filename] m = response.download(request, db) current.session.forget() # get rid of the cookie response.headers['Last-Modified'] = \ request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT") if 'Content-Disposition' in response.headers: del response.headers['Content-Disposition'] response.headers['Pragma'] = 'cache' response.headers['Cache-Control'] = 'private' return m else: raise HTTP(404) def menu(self, controller='default', function='index'): db = self.auth.db request = current.request menu_page = db.wiki_page(slug='wiki-menu') menu = [] if menu_page: tree = {'': menu} regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)') for match in regex.finditer(self.fix_hostname(menu_page.body)): base = match.group('base').replace(' ', '') title = match.group('title') link = match.group('link') title_page = None if link.startswith('@'): items = link[2:].split('/') if len(items) > 3: title_page = items[3] link = URL(a=items[0] or None, c=items[1] or controller, f=items[2] or function, args=items[3:]) parent = tree.get(base[1:], tree['']) subtree = [] tree[base] = subtree parent.append((current.T(title), request.args(0) == title_page, link, subtree)) if self.can_see_menu(): submenu = [] menu.append((current.T('[Wiki]'), None, None, submenu)) if URL() == URL(controller, function): if not str(request.args(0)).startswith('_'): slug = request.args(0) or 'index' mode = 1 elif request.args(0) == '_edit': slug = request.args(1) or 'index' mode = 2 elif request.args(0) == '_editmedia': slug = request.args(1) or 'index' mode = 3 else: mode = 0 if mode in (2, 3): submenu.append((current.T('View Page'), None, URL(controller, function, args=slug))) if mode in (1, 3): submenu.append((current.T('Edit Page'), None, URL(controller, function, args=('_edit', slug)))) if mode in (1, 2): submenu.append((current.T('Edit Page Media'), None, URL(controller, function, args=('_editmedia', slug)))) submenu.append((current.T('Create New Page'), None, URL(controller, function, args=('_create')))) # Moved next if to inside self.auth.user check if self.can_manage(): submenu.append((current.T('Manage Pages'), None, URL(controller, function, args=('_pages')))) submenu.append((current.T('Edit Menu'), None, URL(controller, function, args=('_edit', 'wiki-menu')))) # Also moved inside self.auth.user check submenu.append((current.T('Search Pages'), None, URL(controller, function, args=('_search')))) return menu def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or not query is None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content) def cloud(self): db = self.auth.db count = db.wiki_tag.wiki_page.count(distinct=True) ids = db(db.wiki_tag).select( db.wiki_tag.name, count, distinct=True, groupby=db.wiki_tag.name, orderby=~count, limitby=(0, 20)) if ids: a, b = ids[0](count), ids[-1](count) def style(c): STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem' size = (1.5 * (c - b) / max(a - b, 1) + 1.3) return STYLE % (1.3, size) items = [] for item in ids: items.append(A(item.wiki_tag.name, _style=style(item(count)), _href=URL(args='_search', vars=dict(q=item.wiki_tag.name)))) items.append(' ') return dict(content=DIV(_class='w2p_cloud', *items)) def preview(self, render): request = current.request # FIXME: This is an ugly hack to ensure a default render # engine if not specified (with multiple render engines) if not "render" in request.post_vars: request.post_vars.render = None return render(request.post_vars) class Config(object): def __init__( self, filename, section, default_values={} ): self.config = ConfigParser.ConfigParser(default_values) self.config.read(filename) if not self.config.has_section(section): self.config.add_section(section) self.section = section self.filename = filename def read(self): if not(isinstance(current.session['settings_%s' % self.section], dict)): settings = dict(self.config.items(self.section)) else: settings = current.session['settings_%s' % self.section] return settings def save(self, options): for option, value in options: self.config.set(self.section, option, value) try: self.config.write(open(self.filename, 'w')) result = True except: current.session['settings_%s' % self.section] = dict(self.config.items(self.section)) result = False return result if __name__ == '__main__': import doctest doctest.testmod()
./CrossVul/dataset_final_sorted/CWE-601/py/good_1731_1
crossvul-python_data_good_1915_3
# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from netaddr import IPSet from synapse.config._base import Config, ConfigError from synapse.config._util import validate_config class FederationConfig(Config): section = "federation" def read_config(self, config, **kwargs): # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist = None # type: Optional[dict] federation_domain_whitelist = config.get("federation_domain_whitelist", None) if federation_domain_whitelist is not None: # turn the whitelist into a hash for speed of lookup self.federation_domain_whitelist = {} for domain in federation_domain_whitelist: self.federation_domain_whitelist[domain] = True ip_range_blacklist = config.get("ip_range_blacklist", []) # Attempt to create an IPSet from the given ranges try: self.ip_range_blacklist = IPSet(ip_range_blacklist) except Exception as e: raise ConfigError("Invalid range(s) provided in ip_range_blacklist: %s" % e) # Always blacklist 0.0.0.0, :: self.ip_range_blacklist.update(["0.0.0.0", "::"]) # The federation_ip_range_blacklist is used for backwards-compatibility # and only applies to federation and identity servers. If it is not given, # default to ip_range_blacklist. federation_ip_range_blacklist = config.get( "federation_ip_range_blacklist", ip_range_blacklist ) try: self.federation_ip_range_blacklist = IPSet(federation_ip_range_blacklist) except Exception as e: raise ConfigError( "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e ) # Always blacklist 0.0.0.0, :: self.federation_ip_range_blacklist.update(["0.0.0.0", "::"]) federation_metrics_domains = config.get("federation_metrics_domains") or [] validate_config( _METRICS_FOR_DOMAINS_SCHEMA, federation_metrics_domains, ("federation_metrics_domains",), ) self.federation_metrics_domains = set(federation_metrics_domains) def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Federation ## # Restrict federation to the following whitelist of domains. # N.B. we recommend also firewalling your federation listener to limit # inbound federation traffic as early as possible, rather than relying # purely on this application-layer restriction. If not specified, the # default is to whitelist everything. # #federation_domain_whitelist: # - lon.example.com # - nyc.example.com # - syd.example.com # Prevent outgoing requests from being sent to the following blacklisted IP address # CIDR ranges. If this option is not specified, or specified with an empty list, # no IP range blacklist will be enforced. # # The blacklist applies to the outbound requests for federation, identity servers, # push servers, and for checking key validitity for third-party invite events. # # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly # listed here, since they correspond to unroutable addresses.) # # This option replaces federation_ip_range_blacklist in Synapse v1.24.0. # ip_range_blacklist: - '127.0.0.0/8' - '10.0.0.0/8' - '172.16.0.0/12' - '192.168.0.0/16' - '100.64.0.0/10' - '169.254.0.0/16' - '::1/128' - 'fe80::/64' - 'fc00::/7' # Report prometheus metrics on the age of PDUs being sent to and received from # the following domains. This can be used to give an idea of "delay" on inbound # and outbound federation, though be aware that any delay can be due to problems # at either end or with the intermediate network. # # By default, no domains are monitored in this way. # #federation_metrics_domains: # - matrix.org # - example.com """ _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_3
crossvul-python_data_bad_1915_11
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import logging import random import sys import urllib.parse from io import BytesIO from typing import Callable, Dict, List, Optional, Tuple, Union import attr import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json from zope.interface import implementer from twisted.internet import defer from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import IReactorPluggableNameResolver, IReactorTime from twisted.internet.task import _EPSILON, Cooperator from twisted.web.http_headers import Headers from twisted.web.iweb import IBodyProducer, IResponse import synapse.metrics import synapse.util.retryutils from synapse.api.errors import ( FederationDeniedError, HttpResponseException, RequestSendFailed, ) from synapse.http import QuieterFileBodyProducer from synapse.http.client import ( BlacklistingAgentWrapper, IPBlacklistingResolver, encode_query_args, readBodyToFile, ) from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import ( inject_active_span_byte_dict, set_tag, start_active_span, tags, ) from synapse.types import JsonDict from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure logger = logging.getLogger(__name__) outgoing_requests_counter = Counter( "synapse_http_matrixfederationclient_requests", "", ["method"] ) incoming_responses_counter = Counter( "synapse_http_matrixfederationclient_responses", "", ["method", "code"] ) MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize _next_id = 1 QueryArgs = Dict[str, Union[str, List[str]]] @attr.s(slots=True, frozen=True) class MatrixFederationRequest: method = attr.ib(type=str) """HTTP method """ path = attr.ib(type=str) """HTTP path """ destination = attr.ib(type=str) """The remote server to send the HTTP request to. """ json = attr.ib(default=None, type=Optional[JsonDict]) """JSON to send in the body. """ json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]]) """A callback to generate the JSON. """ query = attr.ib(default=None, type=Optional[dict]) """Query arguments. """ txn_id = attr.ib(default=None, type=Optional[str]) """Unique ID for this request (for logging) """ uri = attr.ib(init=False, type=bytes) """The URI of this request """ def __attrs_post_init__(self) -> None: global _next_id txn_id = "%s-O-%s" % (self.method, _next_id) _next_id = (_next_id + 1) % (MAXINT - 1) object.__setattr__(self, "txn_id", txn_id) destination_bytes = self.destination.encode("ascii") path_bytes = self.path.encode("ascii") if self.query: query_bytes = encode_query_args(self.query) else: query_bytes = b"" # The object is frozen so we can pre-compute this. uri = urllib.parse.urlunparse( (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"") ) object.__setattr__(self, "uri", uri) def get_json(self) -> Optional[JsonDict]: if self.json_callback: return self.json_callback() return self.json async def _handle_json_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, ) -> JsonDict: """ Reads the JSON body of a response, with a timeout Args: reactor: twisted reactor, for the timeout timeout_sec: number of seconds to wait for response to complete request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made Returns: The parsed JSON response """ try: check_content_type_is_json(response.headers) # Use the custom JSON decoder (partially re-implements treq.json_content). d = treq.text_content(response, encoding="utf-8") d.addCallback(json_decoder.decode) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) body = await make_deferred_yieldable(d) except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except Exception as e: logger.warning( "{%s} [%s] Error reading response %s %s: %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), e, ) raise time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, request.method, request.uri.decode("ascii"), ) return body class MatrixFederationHttpClient: """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. Attributes: agent (twisted.web.client.Agent): The twisted Agent used to send the requests. """ def __init__(self, hs, tls_client_options_factory): self.hs = hs self.signing_key = hs.signing_key self.server_name = hs.hostname real_reactor = hs.get_reactor() # We need to use a DNS resolver which filters out blacklisted IP # addresses, to prevent DNS rebinding. nameResolver = IPBlacklistingResolver( real_reactor, None, hs.config.federation_ip_range_blacklist ) @implementer(IReactorPluggableNameResolver) class Reactor: def __getattr__(_self, attr): if attr == "nameResolver": return nameResolver else: return getattr(real_reactor, attr) self.reactor = Reactor() user_agent = hs.version_string if hs.config.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix) user_agent = user_agent.encode("ascii") self.agent = MatrixFederationAgent( self.reactor, tls_client_options_factory, user_agent ) # Use a BlacklistingAgentWrapper to prevent circumventing the IP # blacklist via IP literals in server names self.agent = BlacklistingAgentWrapper( self.agent, ip_blacklist=hs.config.federation_ip_range_blacklist, ) self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout = 60 def schedule(x): self.reactor.callLater(_EPSILON, x) self._cooperator = Cooperator(scheduler=schedule) async def _send_request_with_optional_trailing_slash( self, request: MatrixFederationRequest, try_trailing_slash_on_400: bool = False, **send_request_args ) -> IResponse: """Wrapper for _send_request which can optionally retry the request upon receiving a combination of a 400 HTTP response code and a 'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3 due to #3622. Args: request: details of request to be sent try_trailing_slash_on_400: Whether on receiving a 400 'M_UNRECOGNIZED' from the server to retry the request with a trailing slash appended to the request path. send_request_args: A dictionary of arguments to pass to `_send_request()`. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). Returns: Parsed JSON response body. """ try: response = await self._send_request(request, **send_request_args) except HttpResponseException as e: # Received an HTTP error > 300. Check if it meets the requirements # to retry with a trailing slash if not try_trailing_slash_on_400: raise if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED": raise # Retry with a trailing slash if we received a 400 with # 'M_UNRECOGNIZED' which some endpoints can return when omitting a # trailing slash on Synapse <= v0.99.3. logger.info("Retrying request with trailing slash") # Request is frozen so we create a new instance request = attr.evolve(request, path=request.path + "/") response = await self._send_request(request, **send_request_args) return response async def _send_request( self, request: MatrixFederationRequest, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, ) -> IResponse: """ Sends a request to the given server. Args: request: details of request to be sent retry_on_dns_fail: true if the request should be retied on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. 60s by default. long_retries: whether to use the long retry algorithm. The regular retry algorithm makes 4 attempts, with intervals [0.5s, 1s, 2s]. The long retry algorithm makes 11 attempts, with intervals [4s, 16s, 60s, 60s, ...] Both algorithms add -20%/+40% jitter to the retry intervals. Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). NB: the long retry algorithm takes over 20 minutes to complete, with a default timeout of 60s! ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: Back off if we get a 404 Returns: Resolves with the HTTP response object on success. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout if ( self.hs.config.federation_domain_whitelist is not None and request.destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(request.destination) limiter = await synapse.util.retryutils.get_retry_limiter( request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) method_bytes = request.method.encode("ascii") destination_bytes = request.destination.encode("ascii") path_bytes = request.path.encode("ascii") if request.query: query_bytes = encode_query_args(request.query) else: query_bytes = b"" scope = start_active_span( "outgoing-federation-request", tags={ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, tags.PEER_ADDRESS: request.destination, tags.HTTP_METHOD: request.method, tags.HTTP_URL: request.path, }, finish_on_close=True, ) # Inject the span into the headers headers_dict = {} # type: Dict[bytes, List[bytes]] inject_active_span_byte_dict(headers_dict, request.destination) headers_dict[b"User-Agent"] = [self.version_string_bytes] with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES url_bytes = request.uri url_str = url_bytes.decode("ascii") url_to_sign_bytes = urllib.parse.urlunparse( (b"", b"", path_bytes, None, query_bytes, b"") ) while True: try: json = request.get_json() if json: headers_dict[b"Content-Type"] = [b"application/json"] auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes, json ) data = encode_canonical_json(json) producer = QuieterFileBodyProducer( BytesIO(data), cooperator=self._cooperator ) # type: Optional[IBodyProducer] else: producer = None auth_headers = self.build_auth_headers( destination_bytes, method_bytes, url_to_sign_bytes ) headers_dict[b"Authorization"] = auth_headers logger.debug( "{%s} [%s] Sending request: %s %s; timeout %fs", request.txn_id, request.destination, request.method, url_str, _sec_timeout, ) outgoing_requests_counter.labels(request.method).inc() try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling # that treq.request gives: just use the raw Agent. request_deferred = self.agent.request( method_bytes, url_bytes, headers=Headers(headers_dict), bodyProducer=producer, ) request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, reactor=self.reactor, ) response = await request_deferred except DNSLookupError as e: raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e except Exception as e: raise RequestSendFailed(e, can_retry=True) from e incoming_responses_counter.labels( request.method, response.code ).inc() set_tag(tags.HTTP_STATUS_CODE, response.code) response_phrase = response.phrase.decode("ascii", errors="replace") if 200 <= response.code < 300: logger.debug( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) pass else: logger.info( "{%s} [%s] Got response headers: %d %s", request.txn_id, request.destination, response.code, response_phrase, ) # :'( # Update transactions table? d = treq.content(response) d = timeout_deferred( d, timeout=_sec_timeout, reactor=self.reactor ) try: body = await make_deferred_yieldable(d) except Exception as e: # Eh, we're already going to raise an exception so lets # ignore if this fails. logger.warning( "{%s} [%s] Failed to get error response: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) body = None exc = HttpResponseException( response.code, response_phrase, body ) # Retry if the error is a 429 (Too Many Requests), # otherwise just raise a standard HttpResponseException if response.code == 429: raise RequestSendFailed(exc, can_retry=True) from exc else: raise exc break except RequestSendFailed as e: logger.info( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e.inner_exception), ) if not e.can_retry: raise if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, delay, ) await self.clock.sleep(delay) retries_left -= 1 else: raise except Exception as e: logger.warning( "{%s} [%s] Request failed: %s %s: %s", request.txn_id, request.destination, request.method, url_str, _flatten_response_never_received(e), ) raise return response def build_auth_headers( self, destination: Optional[bytes], method: bytes, url_bytes: bytes, content: Optional[JsonDict] = None, destination_is: Optional[bytes] = None, ) -> List[bytes]: """ Builds the Authorization headers for a federation request Args: destination: The destination homeserver of the request. May be None if the destination is an identity server, in which case destination_is must be non-None. method: The HTTP method of the request url_bytes: The URI path of the request content: The body of the request destination_is: As 'destination', but if the destination is an identity server Returns: A list of headers to be added as "Authorization:" headers """ request = { "method": method.decode("ascii"), "uri": url_bytes.decode("ascii"), "origin": self.server_name, } if destination is not None: request["destination"] = destination.decode("ascii") if destination_is is not None: request["destination_is"] = destination_is.decode("ascii") if content is not None: request["content"] = content request = sign_json(request, self.server_name, self.signing_key) auth_headers = [] for key, sig in request["signatures"][self.server_name].items(): auth_headers.append( ( 'X-Matrix origin=%s,key="%s",sig="%s"' % (self.server_name, key, sig) ).encode("ascii") ) return auth_headers async def put_json( self, destination: str, path: str, args: Optional[QueryArgs] = None, data: Optional[JsonDict] = None, json_data_callback: Optional[Callable[[], JsonDict]] = None, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, ) -> Union[JsonDict, list]: """ Sends the specified json data using PUT Args: destination: The remote server to send the HTTP request to. path: The HTTP path. args: query params data: A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback: A callable returning the dict to use as the request body. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. backoff_on_404: True if we should count a 404 response as a failure of the server (and should therefore back off future requests). try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. This will be attempted before backing off if backing off has been enabled. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="PUT", destination=destination, path=path, query=args, json_callback=json_data_callback, json=data, ) start_ms = self.clock.time_msec() response = await self._send_request_with_optional_trailing_slash( request, try_trailing_slash_on_400, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, long_retries=long_retries, timeout=timeout, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def post_json( self, destination: str, path: str, data: Optional[JsonDict] = None, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryArgs] = None, ) -> Union[JsonDict, list]: """ Sends the specified json data using POST Args: destination: The remote server to send the HTTP request to. path: The HTTP path. data: A dict containing the data that will be used as the request body. This will be encoded as JSON. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. args: query params Returns: dict|list: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="POST", destination=destination, path=path, query=args, json=data ) start_ms = self.clock.time_msec() response = await self._send_request( request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) if timeout: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms, ) return body async def get_json( self, destination: str, path: str, args: Optional[QueryArgs] = None, retry_on_dns_fail: bool = True, timeout: Optional[int] = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, ) -> Union[JsonDict, list]: """ GETs some json from the given host homeserver and path Args: destination: The remote server to send the HTTP request to. path: The HTTP path. args: A dictionary used to create query strings, defaults to None. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="GET", destination=destination, path=path, query=args ) start_ms = self.clock.time_msec() response = await self._send_request_with_optional_trailing_slash( request, try_trailing_slash_on_400, backoff_on_404=False, ignore_backoff=ignore_backoff, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def delete_json( self, destination: str, path: str, long_retries: bool = False, timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryArgs] = None, ) -> Union[JsonDict, list]: """Send a DELETE request to the remote expecting some json response Args: destination: The remote server to send the HTTP request to. path: The HTTP path. long_retries: whether to use the long retry algorithm. See docs on _send_request for details. timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. Note that we may make several attempts to send the request; this timeout applies to the time spent waiting for response headers for *each* attempt (including connection time) as well as the time spent reading the response body after a 200 response. ignore_backoff: true to ignore the historical backoff data and try the request anyway. args: query params Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="DELETE", destination=destination, path=path, query=args ) start_ms = self.clock.time_msec() response = await self._send_request( request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) if timeout is not None: _sec_timeout = timeout / 1000 else: _sec_timeout = self.default_timeout body = await _handle_json_response( self.reactor, _sec_timeout, request, response, start_ms ) return body async def get_file( self, destination: str, path: str, output_stream, args: Optional[QueryArgs] = None, retry_on_dns_fail: bool = True, max_size: Optional[int] = None, ignore_backoff: bool = False, ) -> Tuple[int, Dict[bytes, List[bytes]]]: """GETs a file from a given homeserver Args: destination: The remote server to send the HTTP request to. path: The HTTP path to GET. output_stream: File to write the response body to. args: Optional dictionary used to create the query string. ignore_backoff: true to ignore the historical backoff data and try the request anyway. Returns: Resolves with an (int,dict) tuple of the file length and a dict of the response headers. Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). NotRetryingDestination: If we are not yet ready to retry this server. FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ request = MatrixFederationRequest( method="GET", destination=destination, path=path, query=args ) response = await self._send_request( request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff ) headers = dict(response.headers.getAllRawHeaders()) try: d = readBodyToFile(response, output_stream, max_size) d.addTimeout(self.default_timeout, self.reactor) length = await make_deferred_yieldable(d) except Exception as e: logger.warning( "{%s} [%s] Error reading response: %s", request.txn_id, request.destination, e, ) raise logger.info( "{%s} [%s] Completed: %d %s [%d bytes] %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), length, request.method, request.uri.decode("ascii"), ) return (length, headers) def _flatten_response_never_received(e): if hasattr(e, "reasons"): reasons = ", ".join( _flatten_response_never_received(f.value) for f in e.reasons ) return "%s:[%s]" % (type(e).__name__, reasons) else: return repr(e) def check_content_type_is_json(headers: Headers) -> None: """ Check that a set of HTTP headers have a Content-Type header, and that it is application/json. Args: headers: headers to check Raises: RequestSendFailed: if the Content-Type header is missing or isn't JSON """ c_type = headers.getRawHeaders(b"Content-Type") if c_type is None: raise RequestSendFailed( RuntimeError("No Content-Type header received from remote server"), can_retry=False, ) c_type = c_type[0].decode("ascii") # only the first header val, options = cgi.parse_header(c_type) if val != "application/json": raise RequestSendFailed( RuntimeError( "Remote server sent Content-Type header of '%s', not 'application/json'" % c_type, ), can_retry=False, )
./CrossVul/dataset_final_sorted/CWE-601/py/bad_1915_11
crossvul-python_data_good_1915_8
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for interacting with Identity Servers""" import logging import urllib.parse from typing import Awaitable, Callable, Dict, List, Optional, Tuple from synapse.api.errors import ( CodeMessageException, Codes, HttpResponseException, SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, Requester from synapse.util import json_decoder from synapse.util.hash import sha256_and_url_safe_base64 from synapse.util.stringutils import assert_valid_client_secret, random_string from ._base import BaseHandler logger = logging.getLogger(__name__) id_server_scheme = "https://" class IdentityHandler(BaseHandler): def __init__(self, hs): super().__init__(hs) # An HTTP client for contacting trusted URLs. self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. self.blacklisting_http_client = SimpleHttpClient( hs, ip_blacklist=hs.config.federation_ip_range_blacklist ) self.federation_http_client = hs.get_federation_http_client() self.hs = hs async def threepid_from_creds( self, id_server: str, creds: Dict[str, str] ) -> Optional[JsonDict]: """ Retrieve and validate a threepid identifier from a "credentials" dictionary against a given identity server Args: id_server: The identity server to validate 3PIDs against. Must be a complete URL including the protocol (http(s)://) creds: Dictionary containing the following keys: * client_secret|clientSecret: A unique secret str provided by the client * sid: The ID of the validation session Returns: A dictionary consisting of response params to the /getValidated3pid endpoint of the Identity Service API, or None if the threepid was not found """ client_secret = creds.get("client_secret") or creds.get("clientSecret") if not client_secret: raise SynapseError( 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM ) assert_valid_client_secret(client_secret) session_id = creds.get("sid") if not session_id: raise SynapseError( 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM ) query_params = {"sid": session_id, "client_secret": client_secret} url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" try: data = await self.http_client.get_json(url, query_params) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.info( "%s returned %i for threepid validation for: %s", id_server, e.code, creds, ) return None # Old versions of Sydent return a 200 http code even on a failed validation # check. Thus, in addition to the HttpResponseException check above (which # checks for non-200 errors), we need to make sure validation_session isn't # actually an error, identified by the absence of a "medium" key # See https://github.com/matrix-org/sydent/issues/215 for details if "medium" in data: return data logger.info("%s reported non-validated threepid: %s", id_server, creds) return None async def bind_threepid( self, client_secret: str, sid: str, mxid: str, id_server: str, id_access_token: Optional[str] = None, use_v2: bool = True, ) -> JsonDict: """Bind a 3PID to an identity server Args: client_secret: A unique secret provided by the client sid: The ID of the validation session mxid: The MXID to bind the 3PID to id_server: The domain of the identity server to query id_access_token: The access token to authenticate to the identity server with, if necessary. Required if use_v2 is true use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True Returns: The response from the identity server """ logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) # If an id_access_token is not supplied, force usage of v1 if id_access_token is None: use_v2 = False # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} if use_v2: bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore else: bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) try: # Use the blacklisting http client as this call is only to identity servers # provided by a client data = await self.blacklisting_http_client.post_json_get_json( bind_url, bind_data, headers=headers ) # Remember where we bound the threepid await self.store.add_user_bound_threepid( user_id=mxid, medium=data["medium"], address=data["address"], id_server=id_server, ) return data except HttpResponseException as e: if e.code != 404 or not use_v2: logger.error("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? return data logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) res = await self.bind_threepid( client_secret, sid, mxid, id_server, id_access_token, use_v2=False ) return res async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: """Attempt to remove a 3PID from an identity server, or if one is not provided, all identity servers we're aware the binding is present on Args: mxid: Matrix user ID of binding to be removed threepid: Dict with medium & address of binding to be removed, and an optional id_server. Raises: SynapseError: If we failed to contact the identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding (or no identity server found to contact). """ if threepid.get("id_server"): id_servers = [threepid["id_server"]] else: id_servers = await self.store.get_id_servers_user_bound( user_id=mxid, medium=threepid["medium"], address=threepid["address"] ) # We don't know where to unbind, so we don't have a choice but to return if not id_servers: return False changed = True for id_server in id_servers: changed &= await self.try_unbind_threepid_with_id_server( mxid, threepid, id_server ) return changed async def try_unbind_threepid_with_id_server( self, mxid: str, threepid: dict, id_server: str ) -> bool: """Removes a binding from an identity server Args: mxid: Matrix user ID of binding to be removed threepid: Dict with medium & address of binding to be removed id_server: Identity server to unbind from Raises: SynapseError: If we failed to contact the identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding """ url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") content = { "mxid": mxid, "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, } # we abuse the federation http client to sign the request, but we have to send it # using the normal http client since we don't want the SRV lookup and want normal # 'browser-like' HTTPS. auth_headers = self.federation_http_client.build_auth_headers( destination=None, method=b"POST", url_bytes=url_bytes, content=content, destination_is=id_server.encode("ascii"), ) headers = {b"Authorization": auth_headers} try: # Use the blacklisting http client as this call is only to identity servers # provided by a client await self.blacklisting_http_client.post_json_get_json( url, content, headers ) changed = True except HttpResponseException as e: changed = False if e.code in (400, 404, 501): # The remote server probably doesn't support unbinding (yet) logger.warning("Received %d response while unbinding threepid", e.code) else: logger.error("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") await self.store.remove_user_bound_threepid( user_id=mxid, medium=threepid["medium"], address=threepid["address"], id_server=id_server, ) return changed async def send_threepid_validation( self, email_address: str, client_secret: str, send_attempt: int, send_email_func: Callable[[str, str, str, str], Awaitable], next_link: Optional[str] = None, ) -> str: """Send a threepid validation email for password reset or registration purposes Args: email_address: The user's email address client_secret: The provided client secret send_attempt: Which send attempt this is send_email_func: A function that takes an email address, token, client_secret and session_id, sends an email and returns an Awaitable. next_link: The URL to redirect the user to after validation Returns: The new session_id upon success Raises: SynapseError is an error occurred when sending the email """ # Check that this email/client_secret/send_attempt combo is new or # greater than what we've seen previously session = await self.store.get_threepid_validation_session( "email", client_secret, address=email_address, validated=False ) # Check to see if a session already exists and that it is not yet # marked as validated if session and session.get("validated_at") is None: session_id = session["session_id"] last_send_attempt = session["last_send_attempt"] # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: # If not, just return a success without sending an email return session_id else: # An non-validated session does not exist yet. # Generate a session id session_id = random_string(16) if next_link: # Manipulate the next_link to add the sid, because the caller won't get # it until we send a response, by which time we've sent the mail. if "?" in next_link: next_link += "&" else: next_link += "?" next_link += "sid=" + urllib.parse.quote(session_id) # Generate a new validation token token = random_string(32) # Send the mail with the link containing the token, client_secret # and session_id try: await send_email_func(email_address, token, client_secret, session_id) except Exception: logger.exception( "Error sending threepid validation email to %s", email_address ) raise SynapseError(500, "An error was encountered when sending the email") token_expires = ( self.hs.get_clock().time_msec() + self.hs.config.email_validation_token_lifetime ) await self.store.start_or_continue_validation_session( "email", email_address, session_id, client_secret, send_attempt, next_link, token, token_expires, ) return session_id async def requestEmailToken( self, id_server: str, email: str, client_secret: str, send_attempt: int, next_link: Optional[str] = None, ) -> JsonDict: """ Request an external server send an email on our behalf for the purposes of threepid validation. Args: id_server: The identity server to proxy to email: The email to send the message to client_secret: The unique client_secret sends by the user send_attempt: Which attempt this is next_link: A link to redirect the user to once they submit the token Returns: The json response body from the server """ params = { "email": email, "client_secret": client_secret, "send_attempt": send_attempt, } if next_link: params["next_link"] = next_link if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " "details and update your config file." ) try: data = await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/email/requestToken", params, ) return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") async def requestMsisdnToken( self, id_server: str, country: str, phone_number: str, client_secret: str, send_attempt: int, next_link: Optional[str] = None, ) -> JsonDict: """ Request an external server send an SMS message on our behalf for the purposes of threepid validation. Args: id_server: The identity server to proxy to country: The country code of the phone number phone_number: The number to send the message to client_secret: The unique client_secret sends by the user send_attempt: Which attempt this is next_link: A link to redirect the user to once they submit the token Returns: The json response body from the server """ params = { "country": country, "phone_number": phone_number, "client_secret": client_secret, "send_attempt": send_attempt, } if next_link: params["next_link"] = next_link if self.hs.config.using_identity_server_from_trusted_list: # Warn that a deprecated config option is in use logger.warning( 'The config option "trust_identity_server_for_password_resets" ' 'has been replaced by "account_threepid_delegate". ' "Please consult the sample config at docs/sample_config.yaml for " "details and update your config file." ) try: data = await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", params, ) except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") assert self.hs.config.public_baseurl # we need to tell the client to send the token back to us, since it doesn't # otherwise know where to send it, so add submit_url response parameter # (see also MSC2078) data["submit_url"] = ( self.hs.config.public_baseurl + "_matrix/client/unstable/add_threepid/msisdn/submit_token" ) return data async def validate_threepid_session( self, client_secret: str, sid: str ) -> Optional[JsonDict]: """Validates a threepid session with only the client secret and session ID Tries validating against any configured account_threepid_delegates as well as locally. Args: client_secret: A secret provided by the client sid: The ID of the session Returns: The json response if validation was successful, otherwise None """ # XXX: We shouldn't need to keep wrapping and unwrapping this value threepid_creds = {"client_secret": client_secret, "sid": sid} # We don't actually know which medium this 3PID is. Thus we first assume it's email, # and if validation fails we try msisdn validation_session = None # Try to validate as email if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE: # Ask our delegated email identity server validation_session = await self.threepid_from_creds( self.hs.config.account_threepid_delegate_email, threepid_creds ) elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: # Get a validated session matching these details validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True ) if validation_session: return validation_session # Try to validate as msisdn if self.hs.config.account_threepid_delegate_msisdn: # Ask our delegated msisdn identity server validation_session = await self.threepid_from_creds( self.hs.config.account_threepid_delegate_msisdn, threepid_creds ) return validation_session async def proxy_msisdn_submit_token( self, id_server: str, client_secret: str, sid: str, token: str ) -> JsonDict: """Proxy a POST submitToken request to an identity server for verification purposes Args: id_server: The identity server URL to contact client_secret: Secret provided by the client sid: The ID of the session token: The verification token Raises: SynapseError: If we failed to contact the identity server Returns: The response dict from the identity server """ body = {"client_secret": client_secret, "sid": sid, "token": token} try: return await self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", body, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) raise SynapseError(400, "Error contacting the identity server") async def lookup_3pid( self, id_server: str, medium: str, address: str, id_access_token: Optional[str] = None, ) -> Optional[str]: """Looks up a 3pid in the passed identity server. Args: id_server: The server name (including port, if required) of the identity server to use. medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). id_access_token: The access token to authenticate to the identity server with Returns: the matrix ID of the 3pid, or None if it is not recognized. """ if id_access_token is not None: try: results = await self._lookup_3pid_v2( id_server, id_access_token, medium, address ) return results except Exception as e: # Catch HttpResponseExcept for a non-200 response code # Check if this identity server does not know about v2 lookups if isinstance(e, HttpResponseException) and e.code == 404: # This is an old identity server that does not yet support v2 lookups logger.warning( "Attempted v2 lookup on v1 identity server %s. Falling " "back to v1", id_server, ) else: logger.warning("Error when looking up hashing details: %s", e) return None return await self._lookup_3pid_v1(id_server, medium, address) async def _lookup_3pid_v1( self, id_server: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v1 lookup. Args: id_server: The server name (including port, if required) of the identity server to use. medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). Returns: the matrix ID of the 3pid, or None if it is not recognized. """ try: data = await self.blacklisting_http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), {"medium": medium, "address": address}, ) if "mxid" in data: # note: we used to verify the identity server's signature here, but no longer # require or validate it. See the following for context: # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950 return data["mxid"] except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except IOError as e: logger.warning("Error from v1 identity server lookup: %s" % (e,)) return None async def _lookup_3pid_v2( self, id_server: str, id_access_token: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v2 lookup. Args: id_server: The server name (including port, if required) of the identity server to use. id_access_token: The access token to authenticate to the identity server with medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). Returns: the matrix ID of the 3pid, or None if it is not recognised. """ # Check what hashing details are supported by this identity server try: hash_details = await self.blacklisting_http_client.get_json( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") if not isinstance(hash_details, dict): logger.warning( "Got non-dict object when checking hash details of %s%s: %s", id_server_scheme, id_server, hash_details, ) raise SynapseError( 400, "Non-dict object from %s%s during v2 hash_details request: %s" % (id_server_scheme, id_server, hash_details), ) # Extract information from hash_details supported_lookup_algorithms = hash_details.get("algorithms") lookup_pepper = hash_details.get("lookup_pepper") if ( not supported_lookup_algorithms or not isinstance(supported_lookup_algorithms, list) or not lookup_pepper or not isinstance(lookup_pepper, str) ): raise SynapseError( 400, "Invalid hash details received from identity server %s%s: %s" % (id_server_scheme, id_server, hash_details), ) # Check if any of the supported lookup algorithms are present if LookupAlgorithm.SHA256 in supported_lookup_algorithms: # Perform a hashed lookup lookup_algorithm = LookupAlgorithm.SHA256 # Hash address, medium and the pepper with sha256 to_hash = "%s %s %s" % (address, medium, lookup_pepper) lookup_value = sha256_and_url_safe_base64(to_hash) elif LookupAlgorithm.NONE in supported_lookup_algorithms: # Perform a non-hashed lookup lookup_algorithm = LookupAlgorithm.NONE # Combine together plaintext address and medium lookup_value = "%s %s" % (address, medium) else: logger.warning( "None of the provided lookup algorithms of %s are supported: %s", id_server, supported_lookup_algorithms, ) raise SynapseError( 400, "Provided identity server does not support any v2 lookup " "algorithms that this homeserver supports.", ) # Authenticate with identity server given the access token from the client headers = {"Authorization": create_id_access_token_header(id_access_token)} try: lookup_results = await self.blacklisting_http_client.post_json_get_json( "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), { "addresses": [lookup_value], "algorithm": lookup_algorithm, "pepper": lookup_pepper, }, headers=headers, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except Exception as e: logger.warning("Error when performing a v2 3pid lookup: %s", e) raise SynapseError( 500, "Unknown error occurred during identity server lookup" ) # Check for a mapping from what we looked up to an MXID if "mappings" not in lookup_results or not isinstance( lookup_results["mappings"], dict ): logger.warning("No results from 3pid lookup") return None # Return the MXID if it's available, or None otherwise mxid = lookup_results["mappings"].get(lookup_value) return mxid async def ask_id_server_for_third_party_invite( self, requester: Requester, id_server: str, medium: str, address: str, room_id: str, inviter_user_id: str, room_alias: str, room_avatar_url: str, room_join_rules: str, room_name: str, inviter_display_name: str, inviter_avatar_url: str, id_access_token: Optional[str] = None, ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]: """ Asks an identity server for a third party invite. Args: requester id_server: hostname + optional port for the identity server. medium: The literal string "email". address: The third party address being invited. room_id: The ID of the room to which the user is invited. inviter_user_id: The user ID of the inviter. room_alias: An alias for the room, for cosmetic notifications. room_avatar_url: The URL of the room's avatar, for cosmetic notifications. room_join_rules: The join rules of the email (e.g. "public"). room_name: The m.room.name of the room. inviter_display_name: The current display name of the inviter. inviter_avatar_url: The URL of the inviter's avatar. id_access_token (str|None): The access token to authenticate to the identity server with Returns: A tuple containing: token: The token which must be signed to prove authenticity. public_keys ([{"public_key": str, "key_validity_url": str}]): public_key is a base64-encoded ed25519 public key. fallback_public_key: One element from public_keys. display_name: A user-friendly name to represent the invited user. """ invite_config = { "medium": medium, "address": address, "room_id": room_id, "room_alias": room_alias, "room_avatar_url": room_avatar_url, "room_join_rules": room_join_rules, "room_name": room_name, "sender": inviter_user_id, "sender_display_name": inviter_display_name, "sender_avatar_url": inviter_avatar_url, } # Add the identity service access token to the JSON body and use the v2 # Identity Service endpoints if id_access_token is present data = None base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) if id_access_token: key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( id_server_scheme, id_server, ) # Attempt a v2 lookup url = base_url + "/v2/store-invite" try: data = await self.blacklisting_http_client.post_json_get_json( url, invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: if e.code != 404: logger.info("Failed to POST %s with JSON: %s", url, e) raise e if data is None: key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( id_server_scheme, id_server, ) url = base_url + "/api/v1/store-invite" try: data = await self.blacklisting_http_client.post_json_get_json( url, invite_config ) except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( "Error trying to call /store-invite on %s%s: %s", id_server_scheme, id_server, e, ) if data is None: # Some identity servers may only support application/x-www-form-urlencoded # types. This is especially true with old instances of Sydent, see # https://github.com/matrix-org/sydent/pull/170 try: data = await self.blacklisting_http_client.post_urlencoded_get_json( url, invite_config ) except HttpResponseException as e: logger.warning( "Error calling /store-invite on %s%s with fallback " "encoding: %s", id_server_scheme, id_server, e, ) raise e # TODO: Check for success token = data["token"] public_keys = data.get("public_keys", []) if "public_key" in data: fallback_public_key = { "public_key": data["public_key"], "key_validity_url": key_validity_url, } else: fallback_public_key = public_keys[0] if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] return token, public_keys, fallback_public_key, display_name def create_id_access_token_header(id_access_token: str) -> List[str]: """Create an Authorization header for passing to SimpleHttpClient as the header value of an HTTP request. Args: id_access_token: An identity server access token. Returns: The ascii-encoded bearer token encased in a list. """ # Prefix with Bearer bearer_token = "Bearer %s" % id_access_token # Encode headers to standard ascii bearer_token.encode("ascii") # Return as a list as that's how SimpleHttpClient takes header values return [bearer_token] class LookupAlgorithm: """ Supported hashing algorithms when performing a 3PID lookup. SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 encoding NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext """ SHA256 = "sha256" NONE = "none"
./CrossVul/dataset_final_sorted/CWE-601/py/good_1915_8
crossvul-python_data_good_1315_0
# Zulip's main markdown implementation. See docs/subsystems/markdown.md for # detailed documentation on our markdown syntax. from typing import (Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Set, Tuple, TypeVar, Union, cast) from mypy_extensions import TypedDict from typing.re import Match, Pattern import markdown import logging import traceback import urllib import urllib.parse import re import os import html import time import functools import ujson import xml.etree.cElementTree as etree from xml.etree.cElementTree import Element from collections import deque, defaultdict import requests from django.conf import settings from django.db.models import Q from markdown.extensions import codehilite, nl2br, tables from zerver.lib.bugdown import fenced_code from zerver.lib.bugdown.fenced_code import FENCE_RE from zerver.lib.camo import get_camo_url from zerver.lib.emoji import translate_emoticons, emoticon_regex from zerver.lib.mention import possible_mentions, \ possible_user_group_mentions, extract_user_group from zerver.lib.url_encoding import encode_stream from zerver.lib.thumbnail import user_uploads_or_external from zerver.lib.timeout import timeout, TimeoutExpired from zerver.lib.cache import cache_with_key, NotFoundInCache from zerver.lib.url_preview import preview as link_preview from zerver.models import ( all_realm_filters, get_active_streams, MAX_MESSAGE_LENGTH, Message, Realm, realm_filters_for_realm, UserProfile, UserGroup, UserGroupMembership, ) import zerver.lib.mention as mention from zerver.lib.tex import render_tex from zerver.lib.exceptions import BugdownRenderingException ReturnT = TypeVar('ReturnT') def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]: ''' Use this decorator with extreme caution. The function you wrap should have no dependency on any arguments (no args, no kwargs) nor should it depend on any global state. ''' val = None def cache_wrapper() -> ReturnT: nonlocal val if val is None: val = method() return val return cache_wrapper FullNameInfo = TypedDict('FullNameInfo', { 'id': int, 'email': str, 'full_name': str, }) DbData = Dict[str, Any] # Format version of the bugdown rendering; stored along with rendered # messages so that we can efficiently determine what needs to be re-rendered version = 1 _T = TypeVar('_T') ElementStringNone = Union[Element, Optional[str]] AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)' GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)' EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)' def verbose_compile(pattern: str) -> Any: return re.compile( "^(.*?)%s(.*?)$" % pattern, re.DOTALL | re.UNICODE | re.VERBOSE ) def normal_compile(pattern: str) -> Any: return re.compile( r"^(.*?)%s(.*)$" % pattern, re.DOTALL | re.UNICODE ) STREAM_LINK_REGEX = r""" (?<![^\s'"\(,:<]) # Start after whitespace or specified chars \#\*\* # and after hash sign followed by double asterisks (?P<stream_name>[^\*]+) # stream name can contain anything \*\* # ends by double asterisks """ @one_time def get_compiled_stream_link_regex() -> Pattern: return verbose_compile(STREAM_LINK_REGEX) LINK_REGEX = None # type: Pattern def get_web_link_regex() -> str: # We create this one time, but not at startup. So the # first message rendered in any process will have some # extra costs. It's roughly 75ms to run this code, so # caching the value in LINK_REGEX is super important here. global LINK_REGEX if LINK_REGEX is not None: return LINK_REGEX tlds = '|'.join(list_of_tlds()) # A link starts at a word boundary, and ends at space, punctuation, or end-of-input. # # We detect a url either by the `https?://` or by building around the TLD. # In lieu of having a recursive regex (which python doesn't support) to match # arbitrary numbers of nested matching parenthesis, we manually build a regexp that # can match up to six # The inner_paren_contents chunk matches the innermore non-parenthesis-holding text, # and the paren_group matches text with, optionally, a matching set of parens inner_paren_contents = r"[^\s()\"]*" paren_group = r""" [^\s()\"]*? # Containing characters that won't end the URL (?: \( %s \) # and more characters in matched parens [^\s()\"]*? # followed by more characters )* # zero-or-more sets of paired parens """ nested_paren_chunk = paren_group for i in range(6): nested_paren_chunk = nested_paren_chunk % (paren_group,) nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,) file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r"" REGEX = r""" (?<![^\s'"\(,:<]) # Start after whitespace or specified chars # (Double-negative lookbehind to allow start-of-string) (?P<url> # Main group (?:(?: # Domain part https?://[\w.:@-]+? # If it has a protocol, anything goes. |(?: # Or, if not, be more strict to avoid false-positives (?:[\w-]+\.)+ # One or more domain components, separated by dots (?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt) ) ) (?:/ # A path, beginning with / %s # zero-to-6 sets of paired parens )?) # Path is optional | (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path %s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True | (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html ) (?= # URL must be followed by (not included in group) [!:;\?\),\.\'\"\>]* # Optional punctuation characters (?:\Z|\s) # followed by whitespace or end of string ) """ % (tlds, nested_paren_chunk, file_links) LINK_REGEX = verbose_compile(REGEX) return LINK_REGEX def clear_state_for_testing() -> None: # The link regex never changes in production, but our tests # try out both sides of ENABLE_FILE_LINKS, so we need # a way to clear it. global LINK_REGEX LINK_REGEX = None bugdown_logger = logging.getLogger() def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str: """ If the link points to a local destination we can just switch to that instead of opening a new tab. """ if db_data: realm_uri_prefix = db_data['realm_uri'] + "/" if link.startswith(realm_uri_prefix): # +1 to skip the `/` before the hash link. return link[len(realm_uri_prefix):] return link def url_embed_preview_enabled(message: Optional[Message]=None, realm: Optional[Realm]=None, no_previews: Optional[bool]=False) -> bool: if not settings.INLINE_URL_EMBED_PREVIEW: return False if no_previews: return False if realm is None: if message is not None: realm = message.get_realm() if realm is None: # realm can be None for odd use cases # like generating documentation or running # test code return True return realm.inline_url_embed_preview def image_preview_enabled(message: Optional[Message]=None, realm: Optional[Realm]=None, no_previews: Optional[bool]=False) -> bool: if not settings.INLINE_IMAGE_PREVIEW: return False if no_previews: return False if realm is None: if message is not None: realm = message.get_realm() if realm is None: # realm can be None for odd use cases # like generating documentation or running # test code return True return realm.inline_image_preview def list_of_tlds() -> List[str]: # HACK we manually blacklist a few domains blacklist = ['PY\n', "MD\n"] # tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt') tlds = [tld.lower().strip() for tld in open(tlds_file, 'r') if tld not in blacklist and not tld[0].startswith('#')] tlds.sort(key=len, reverse=True) return tlds def walk_tree(root: Element, processor: Callable[[Element], Optional[_T]], stop_after_first: bool=False) -> List[_T]: results = [] queue = deque([root]) while queue: currElement = queue.popleft() for child in currElement.getchildren(): if child.getchildren(): queue.append(child) result = processor(child) if result is not None: results.append(result) if stop_after_first: return results return results ElementFamily = NamedTuple('ElementFamily', [ ('grandparent', Optional[Element]), ('parent', Element), ('child', Element) ]) ResultWithFamily = NamedTuple('ResultWithFamily', [ ('family', ElementFamily), ('result', Any) ]) ElementPair = NamedTuple('ElementPair', [ ('parent', Optional[Element]), ('value', Element) ]) def walk_tree_with_family(root: Element, processor: Callable[[Element], Optional[_T]] ) -> List[ResultWithFamily]: results = [] queue = deque([ElementPair(parent=None, value=root)]) while queue: currElementPair = queue.popleft() for child in currElementPair.value.getchildren(): if child.getchildren(): queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3 result = processor(child) if result is not None: if currElementPair.parent is not None: grandparent_element = cast(ElementPair, currElementPair.parent) grandparent = grandparent_element.value else: grandparent = None family = ElementFamily( grandparent=grandparent, parent=currElementPair.value, child=child ) results.append(ResultWithFamily( family=family, result=result )) return results # height is not actually used def add_a( root: Element, url: str, link: str, title: Optional[str]=None, desc: Optional[str]=None, class_attr: str="message_inline_image", data_id: Optional[str]=None, insertion_index: Optional[int]=None, already_thumbnailed: Optional[bool]=False ) -> None: title = title if title is not None else url_filename(link) title = title if title else "" desc = desc if desc is not None else "" if insertion_index is not None: div = markdown.util.etree.Element("div") root.insert(insertion_index, div) else: div = markdown.util.etree.SubElement(root, "div") div.set("class", class_attr) a = markdown.util.etree.SubElement(div, "a") a.set("href", link) a.set("target", "_blank") a.set("title", title) if data_id is not None: a.set("data-id", data_id) img = markdown.util.etree.SubElement(a, "img") if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url): # See docs/thumbnailing.md for some high-level documentation. # # We strip leading '/' from relative URLs here to ensure # consistency in what gets passed to /thumbnail url = url.lstrip('/') img.set("src", "/thumbnail?url={0}&size=thumbnail".format( urllib.parse.quote(url, safe='') )) img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format( urllib.parse.quote(url, safe='') )) else: img.set("src", url) if class_attr == "message_inline_ref": summary_div = markdown.util.etree.SubElement(div, "div") title_div = markdown.util.etree.SubElement(summary_div, "div") title_div.set("class", "message_inline_image_title") title_div.text = title desc_div = markdown.util.etree.SubElement(summary_div, "desc") desc_div.set("class", "message_inline_image_desc") def add_embed(root: Element, link: str, extracted_data: Dict[str, Any]) -> None: container = markdown.util.etree.SubElement(root, "div") container.set("class", "message_embed") img_link = extracted_data.get('image') if img_link: parsed_img_link = urllib.parse.urlparse(img_link) # Append domain where relative img_link url is given if not parsed_img_link.netloc: parsed_url = urllib.parse.urlparse(link) domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url) img_link = urllib.parse.urljoin(domain, img_link) img = markdown.util.etree.SubElement(container, "a") img.set("style", "background-image: url(" + img_link + ")") img.set("href", link) img.set("target", "_blank") img.set("class", "message_embed_image") data_container = markdown.util.etree.SubElement(container, "div") data_container.set("class", "data-container") title = extracted_data.get('title') if title: title_elm = markdown.util.etree.SubElement(data_container, "div") title_elm.set("class", "message_embed_title") a = markdown.util.etree.SubElement(title_elm, "a") a.set("href", link) a.set("target", "_blank") a.set("title", title) a.text = title description = extracted_data.get('description') if description: description_elm = markdown.util.etree.SubElement(data_container, "div") description_elm.set("class", "message_embed_description") description_elm.text = description @cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data") def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]: if settings.TEST_SUITE: from . import testing_mocks res = testing_mocks.twitter(tweet_id) else: creds = { 'consumer_key': settings.TWITTER_CONSUMER_KEY, 'consumer_secret': settings.TWITTER_CONSUMER_SECRET, 'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY, 'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET, } if not all(creds.values()): return None # We lazily import twitter here because its import process is # surprisingly slow, and doing so has a significant impact on # the startup performance of `manage.py` commands. import twitter try: api = twitter.Api(tweet_mode='extended', **creds) # Sometimes Twitter hangs on responses. Timing out here # will cause the Tweet to go through as-is with no inline # preview, rather than having the message be rejected # entirely. This timeout needs to be less than our overall # formatting timeout. tweet = timeout(3, api.GetStatus, tweet_id) res = tweet.AsDict() except AttributeError: bugdown_logger.error('Unable to load twitter api, you may have the wrong ' 'library installed, see https://github.com/zulip/zulip/issues/86') return None except TimeoutExpired: # We'd like to try again later and not cache the bad result, # so we need to re-raise the exception (just as though # we were being rate-limited) raise except twitter.TwitterError as e: t = e.args[0] if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34): # Code 34 means that the message doesn't exist; return # None so that we will cache the error return None elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or t[0]['code'] == 130): # Code 88 means that we were rate-limited and 130 # means Twitter is having capacity issues; either way # just raise the error so we don't cache None and will # try again later. raise else: # It's not clear what to do in cases of other errors, # but for now it seems reasonable to log at error # level (so that we get notified), but then cache the # failure to proceed with our usual work bugdown_logger.error(traceback.format_exc()) return None return res HEAD_START_RE = re.compile('^head[ >]') HEAD_END_RE = re.compile('^/head[ >]') META_START_RE = re.compile('^meta[ >]') META_END_RE = re.compile('^/meta[ >]') def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]: in_head = False # HTML will auto close meta tags, when we start the next tag add # a closing tag if it has not been closed yet. last_closed = True head = [] # TODO: What if response content is huge? Should we get headers first? try: content = requests.get(url, timeout=1).text except Exception: return None # Extract the head and meta tags # All meta tags are self closing, have no children or are closed # automatically. for part in content.split('<'): if not in_head and HEAD_START_RE.match(part): # Started the head node output it to have a document root in_head = True head.append('<head>') elif in_head and HEAD_END_RE.match(part): # Found the end of the head close any remaining tag then stop # processing in_head = False if not last_closed: last_closed = True head.append('</meta>') head.append('</head>') break elif in_head and META_START_RE.match(part): # Found a meta node copy it if not last_closed: head.append('</meta>') last_closed = True head.append('<') head.append(part) if '/>' not in part: last_closed = False elif in_head and META_END_RE.match(part): # End of a meta node just copy it to close the tag head.append('<') head.append(part) last_closed = True try: doc = etree.fromstring(''.join(head)) except etree.ParseError: return None og_image = doc.find('meta[@property="og:image"]') og_title = doc.find('meta[@property="og:title"]') og_desc = doc.find('meta[@property="og:description"]') title = None desc = None if og_image is not None: image = og_image.get('content') else: return None if og_title is not None: title = og_title.get('content') if og_desc is not None: desc = og_desc.get('content') return {'image': image, 'title': title, 'desc': desc} def get_tweet_id(url: str) -> Optional[str]: parsed_url = urllib.parse.urlparse(url) if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')): return None to_match = parsed_url.path # In old-style twitter.com/#!/wdaher/status/1231241234-style URLs, # we need to look at the fragment instead if parsed_url.path == '/' and len(parsed_url.fragment) > 5: to_match = parsed_url.fragment tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match) if not tweet_id_match: return None return tweet_id_match.group("tweetid") class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor): def run(self, root: Element) -> None: # Get all URLs from the blob found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None) for img in found_imgs: url = img.get("src") if urllib.parse.urlsplit(url).scheme != "http": # Don't rewrite images on our own site (e.g. emoji). continue img.set("src", get_camo_url(url)) class BacktickPattern(markdown.inlinepatterns.Pattern): """ Return a `<code>` element containing the matching text. """ def __init__(self, pattern: str) -> None: markdown.inlinepatterns.Pattern.__init__(self, pattern) self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX) self.tag = 'code' def handleMatch(self, m: Match[str]) -> Union[str, Element]: if m.group(4): el = markdown.util.etree.Element(self.tag) # Modified to not strip whitespace el.text = markdown.util.AtomicString(m.group(4)) return el else: return m.group(2).replace('\\\\', self.ESCAPED_BSLASH) class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor): TWITTER_MAX_IMAGE_HEIGHT = 400 TWITTER_MAX_TO_PREVIEW = 3 INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5 def __init__(self, md: markdown.Markdown) -> None: markdown.treeprocessors.Treeprocessor.__init__(self, md) def get_actual_image_url(self, url: str) -> str: # Add specific per-site cases to convert image-preview urls to image urls. # See https://github.com/zulip/zulip/issues/4658 for more information parsed_url = urllib.parse.urlparse(url) if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')): # https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png -> # https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png split_path = parsed_url.path.split('/') if len(split_path) > 3 and split_path[3] == "blob": return urllib.parse.urljoin('https://raw.githubusercontent.com', '/'.join(split_path[0:3] + split_path[4:])) return url def is_image(self, url: str) -> bool: if not self.markdown.image_preview_enabled: return False parsed_url = urllib.parse.urlparse(url) # List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093 for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]: if parsed_url.path.lower().endswith(ext): return True return False def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]: # TODO: The returned Dict could possibly be a TypedDict in future. parsed_url = urllib.parse.urlparse(url) if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')): is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/') # Only allow preview Dropbox shared links if not (parsed_url.path.startswith('/s/') or parsed_url.path.startswith('/sh/') or is_album): return None # Try to retrieve open graph protocol info for a preview # This might be redundant right now for shared links for images. # However, we might want to make use of title and description # in the future. If the actual image is too big, we might also # want to use the open graph image. image_info = fetch_open_graph_image(url) is_image = is_album or self.is_image(url) # If it is from an album or not an actual image file, # just use open graph image. if is_album or not is_image: # Failed to follow link to find an image preview so # use placeholder image and guess filename if image_info is None: return None image_info["is_image"] = is_image return image_info # Otherwise, try to retrieve the actual image. # This is because open graph image from Dropbox may have padding # and gifs do not work. # TODO: What if image is huge? Should we get headers first? if image_info is None: image_info = dict() image_info['is_image'] = True parsed_url_list = list(parsed_url) parsed_url_list[4] = "dl=1" # Replaces query image_info["image"] = urllib.parse.urlunparse(parsed_url_list) return image_info return None def youtube_id(self, url: str) -> Optional[str]: if not self.markdown.image_preview_enabled: return None # Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s # Slightly modified to support URLs of the form youtu.be/<id> # If it matches, match.group(2) is the video id. schema_re = r'(?:https?://)' host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)' param_re = r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))' id_re = r'([0-9A-Za-z_-]+)' youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$' youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re) match = re.match(youtube_re, url) if match is None: return None return match.group(2) def youtube_image(self, url: str) -> Optional[str]: yt_id = self.youtube_id(url) if yt_id is not None: return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,) return None def vimeo_id(self, url: str) -> Optional[str]: if not self.markdown.image_preview_enabled: return None #(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?) # If it matches, match.group('id') is the video id. vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \ r'(?:channels\/(?:\w+\/)?|groups\/' + \ r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$' match = re.match(vimeo_re, url) if match is None: return None return match.group(5) def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]: title = extracted_data.get("title") if title is not None: return "Vimeo - {}".format(title) return None def twitter_text(self, text: str, urls: List[Dict[str, str]], user_mentions: List[Dict[str, Any]], media: List[Dict[str, Any]]) -> Element: """ Use data from the twitter API to turn links, mentions and media into A tags. Also convert unicode emojis to images. This works by using the urls, user_mentions and media data from the twitter API and searching for unicode emojis in the text using `unicode_emoji_regex`. The first step is finding the locations of the URLs, mentions, media and emoji in the text. For each match we build a dictionary with type, the start location, end location, the URL to link to, and the text(codepoint and title in case of emojis) to be used in the link(image in case of emojis). Next we sort the matches by start location. And for each we add the text from the end of the last link to the start of the current link to the output. The text needs to added to the text attribute of the first node (the P tag) or the tail the last link created. Finally we add any remaining text to the last node. """ to_process = [] # type: List[Dict[str, Any]] # Build dicts for URLs for url_data in urls: short_url = url_data["url"] full_url = url_data["expanded_url"] for match in re.finditer(re.escape(short_url), text, re.IGNORECASE): to_process.append({ 'type': 'url', 'start': match.start(), 'end': match.end(), 'url': short_url, 'text': full_url, }) # Build dicts for mentions for user_mention in user_mentions: screen_name = user_mention['screen_name'] mention_string = '@' + screen_name for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE): to_process.append({ 'type': 'mention', 'start': match.start(), 'end': match.end(), 'url': 'https://twitter.com/' + urllib.parse.quote(screen_name), 'text': mention_string, }) # Build dicts for media for media_item in media: short_url = media_item['url'] expanded_url = media_item['expanded_url'] for match in re.finditer(re.escape(short_url), text, re.IGNORECASE): to_process.append({ 'type': 'media', 'start': match.start(), 'end': match.end(), 'url': short_url, 'text': expanded_url, }) # Build dicts for emojis for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE): orig_syntax = match.group('syntax') codepoint = unicode_emoji_to_codepoint(orig_syntax) if codepoint in codepoint_to_name: display_string = ':' + codepoint_to_name[codepoint] + ':' to_process.append({ 'type': 'emoji', 'start': match.start(), 'end': match.end(), 'codepoint': codepoint, 'title': display_string, }) to_process.sort(key=lambda x: x['start']) p = current_node = markdown.util.etree.Element('p') def set_text(text: str) -> None: """ Helper to set the text or the tail of the current_node """ if current_node == p: current_node.text = text else: current_node.tail = text db_data = self.markdown.zulip_db_data current_index = 0 for item in to_process: # The text we want to link starts in already linked text skip it if item['start'] < current_index: continue # Add text from the end of last link to the start of the current # link set_text(text[current_index:item['start']]) current_index = item['end'] if item['type'] != 'emoji': current_node = elem = url_to_a(db_data, item['url'], item['text']) else: current_node = elem = make_emoji(item['codepoint'], item['title']) p.append(elem) # Add any unused text set_text(text[current_index:]) return p def twitter_link(self, url: str) -> Optional[Element]: tweet_id = get_tweet_id(url) if tweet_id is None: return None try: res = fetch_tweet_data(tweet_id) if res is None: return None user = res['user'] # type: Dict[str, Any] tweet = markdown.util.etree.Element("div") tweet.set("class", "twitter-tweet") img_a = markdown.util.etree.SubElement(tweet, 'a') img_a.set("href", url) img_a.set("target", "_blank") profile_img = markdown.util.etree.SubElement(img_a, 'img') profile_img.set('class', 'twitter-avatar') # For some reason, for, e.g. tweet 285072525413724161, # python-twitter does not give us a # profile_image_url_https, but instead puts that URL in # profile_image_url. So use _https if available, but fall # back gracefully. image_url = user.get('profile_image_url_https', user['profile_image_url']) profile_img.set('src', image_url) text = html.unescape(res['full_text']) urls = res.get('urls', []) user_mentions = res.get('user_mentions', []) media = res.get('media', []) # type: List[Dict[str, Any]] p = self.twitter_text(text, urls, user_mentions, media) tweet.append(p) span = markdown.util.etree.SubElement(tweet, 'span') span.text = "- %s (@%s)" % (user['name'], user['screen_name']) # Add image previews for media_item in media: # Only photos have a preview image if media_item['type'] != 'photo': continue # Find the image size that is smaller than # TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest size_name_tuples = list(media_item['sizes'].items()) size_name_tuples.sort(reverse=True, key=lambda x: x[1]['h']) for size_name, size in size_name_tuples: if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT: break media_url = '%s:%s' % (media_item['media_url_https'], size_name) img_div = markdown.util.etree.SubElement(tweet, 'div') img_div.set('class', 'twitter-image') img_a = markdown.util.etree.SubElement(img_div, 'a') img_a.set('href', media_item['url']) img_a.set('target', '_blank') img_a.set('title', media_item['url']) img = markdown.util.etree.SubElement(img_a, 'img') img.set('src', media_url) return tweet except Exception: # We put this in its own try-except because it requires external # connectivity. If Twitter flakes out, we don't want to not-render # the entire message; we just want to not show the Twitter preview. bugdown_logger.warning(traceback.format_exc()) return None def get_url_data(self, e: Element) -> Optional[Tuple[str, str]]: if e.tag == "a": if e.text is not None: return (e.get("href"), e.text) return (e.get("href"), e.get("href")) return None def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None: grandparent = found_url.family.grandparent parent = found_url.family.parent ahref_element = found_url.family.child (url, text) = found_url.result actual_url = self.get_actual_image_url(url) # url != text usually implies a named link, which we opt not to remove url_eq_text = (url == text) if parent.tag == 'li': add_a(parent, self.get_actual_image_url(url), url, title=text) if not parent.text and not ahref_element.tail and url_eq_text: parent.remove(ahref_element) elif parent.tag == 'p': parent_index = None for index, uncle in enumerate(grandparent.getchildren()): if uncle is parent: parent_index = index break if parent_index is not None: ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index) add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index) else: # We're not inserting after parent, since parent not found. # Append to end of list of grandparent's children as normal add_a(grandparent, actual_url, url, title=text) # If link is alone in a paragraph, delete paragraph containing it if (len(parent.getchildren()) == 1 and (not parent.text or parent.text == "\n") and not ahref_element.tail and url_eq_text): grandparent.remove(parent) else: # If none of the above criteria match, fall back to old behavior add_a(root, actual_url, url, title=text) def find_proper_insertion_index(self, grandparent: Element, parent: Element, parent_index_in_grandparent: int) -> int: # If there are several inline images from same paragraph, ensure that # they are in correct (and not opposite) order by inserting after last # inline image from paragraph 'parent' uncles = grandparent.getchildren() parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")] insertion_index = parent_index_in_grandparent while True: insertion_index += 1 if insertion_index >= len(uncles): return insertion_index uncle = uncles[insertion_index] inline_image_classes = ['message_inline_image', 'message_inline_ref'] if ( uncle.tag != 'div' or 'class' not in uncle.keys() or uncle.attrib['class'] not in inline_image_classes ): return insertion_index uncle_link = list(uncle.iter(tag="a"))[0].attrib['href'] if uncle_link not in parent_links: return insertion_index def is_absolute_url(self, url: str) -> bool: return bool(urllib.parse.urlparse(url).netloc) def run(self, root: Element) -> None: # Get all URLs from the blob found_urls = walk_tree_with_family(root, self.get_url_data) if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE: return rendered_tweet_count = 0 for found_url in found_urls: (url, text) = found_url.result if not self.is_absolute_url(url): if self.is_image(url): self.handle_image_inlining(root, found_url) # We don't have a strong use case for doing url preview for relative links. continue dropbox_image = self.dropbox_image(url) if dropbox_image is not None: class_attr = "message_inline_ref" is_image = dropbox_image["is_image"] if is_image: class_attr = "message_inline_image" # Not making use of title and description of images add_a(root, dropbox_image['image'], url, title=dropbox_image.get('title', ""), desc=dropbox_image.get('desc', ""), class_attr=class_attr, already_thumbnailed=True) continue if self.is_image(url): self.handle_image_inlining(root, found_url) continue if get_tweet_id(url) is not None: if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW: # Only render at most one tweet per message continue twitter_data = self.twitter_link(url) if twitter_data is None: # This link is not actually a tweet known to twitter continue rendered_tweet_count += 1 div = markdown.util.etree.SubElement(root, "div") div.set("class", "inline-preview-twitter") div.insert(0, twitter_data) continue youtube = self.youtube_image(url) if youtube is not None: yt_id = self.youtube_id(url) add_a(root, youtube, url, None, None, "youtube-video message_inline_image", yt_id, already_thumbnailed=True) continue db_data = self.markdown.zulip_db_data if db_data and db_data['sent_by_bot']: continue if not self.markdown.url_embed_preview_enabled: continue try: extracted_data = link_preview.link_embed_data_from_cache(url) except NotFoundInCache: self.markdown.zulip_message.links_for_preview.add(url) continue if extracted_data: vm_id = self.vimeo_id(url) if vm_id is not None: vimeo_image = extracted_data.get('image') vimeo_title = self.vimeo_title(extracted_data) if vimeo_image is not None: add_a(root, vimeo_image, url, vimeo_title, None, "vimeo-video message_inline_image", vm_id, already_thumbnailed=True) if vimeo_title is not None: found_url.family.child.text = vimeo_title else: add_embed(root, url, extracted_data) class Avatar(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: img = markdown.util.etree.Element('img') email_address = match.group('email') email = email_address.strip().lower() profile_id = None db_data = self.markdown.zulip_db_data if db_data is not None: user_dict = db_data['email_info'].get(email) if user_dict is not None: profile_id = user_dict['id'] img.set('class', 'message_body_gravatar') img.set('src', '/avatar/{0}?s=30'.format(profile_id or email)) img.set('title', email) img.set('alt', email) return img def possible_avatar_emails(content: str) -> Set[str]: emails = set() for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]: matches = re.findall(REGEX, content) for email in matches: if email: emails.add(email) return emails path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT, "generated", "emoji", "name_to_codepoint.json") with open(path_to_name_to_codepoint) as name_to_codepoint_file: name_to_codepoint = ujson.load(name_to_codepoint_file) path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT, "generated", "emoji", "codepoint_to_name.json") with open(path_to_codepoint_to_name) as codepoint_to_name_file: codepoint_to_name = ujson.load(codepoint_to_name_file) # All of our emojis(non ZWJ sequences) belong to one of these unicode blocks: # \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement # \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement # \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs # \U0001f600-\U0001f64f - Emoticons (Emoji) # \U0001f680-\U0001f6ff - Transport and Map Symbols # \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs # \u2000-\u206f - General Punctuation # \u2300-\u23ff - Miscellaneous Technical # \u2400-\u243f - Control Pictures # \u2440-\u245f - Optical Character Recognition # \u2460-\u24ff - Enclosed Alphanumerics # \u2500-\u257f - Box Drawing # \u2580-\u259f - Block Elements # \u25a0-\u25ff - Geometric Shapes # \u2600-\u26ff - Miscellaneous Symbols # \u2700-\u27bf - Dingbats # \u2900-\u297f - Supplemental Arrows-B # \u2b00-\u2bff - Miscellaneous Symbols and Arrows # \u3000-\u303f - CJK Symbols and Punctuation # \u3200-\u32ff - Enclosed CJK Letters and Months unicode_emoji_regex = '(?P<syntax>['\ '\U0001F100-\U0001F64F' \ '\U0001F680-\U0001F6FF' \ '\U0001F900-\U0001F9FF' \ '\u2000-\u206F' \ '\u2300-\u27BF' \ '\u2900-\u297F' \ '\u2B00-\u2BFF' \ '\u3000-\u303F' \ '\u3200-\u32FF' \ '])' # The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]| # \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]| # [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for # frontend unicode emoji processing. # The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f # The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff # The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff # The JS regex [\u2000-\u206f] represents \u2000-\u206f # The JS regex [\u2300-\u27bf] represents \u2300-\u27bf # Similarly other JS regexes can be mapped to the respective unicode blocks. # For more information, please refer to the following article: # http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript def make_emoji(codepoint: str, display_string: str) -> Element: # Replace underscore in emoji's title with space title = display_string[1:-1].replace("_", " ") span = markdown.util.etree.Element('span') span.set('class', 'emoji emoji-%s' % (codepoint,)) span.set('title', title) span.set('role', 'img') span.set('aria-label', title) span.text = display_string return span def make_realm_emoji(src: str, display_string: str) -> Element: elt = markdown.util.etree.Element('img') elt.set('src', src) elt.set('class', 'emoji') elt.set("alt", display_string) elt.set("title", display_string[1:-1].replace("_", " ")) return elt def unicode_emoji_to_codepoint(unicode_emoji: str) -> str: codepoint = hex(ord(unicode_emoji))[2:] # Unicode codepoints are minimum of length 4, padded # with zeroes if the length is less than zero. while len(codepoint) < 4: codepoint = '0' + codepoint return codepoint class EmoticonTranslation(markdown.inlinepatterns.Pattern): """ Translates emoticons like `:)` into emoji like `:smile:`. """ def handleMatch(self, match: Match[str]) -> Optional[Element]: db_data = self.markdown.zulip_db_data if db_data is None or not db_data['translate_emoticons']: return None emoticon = match.group('emoticon') translated = translate_emoticons(emoticon) name = translated[1:-1] return make_emoji(name_to_codepoint[name], translated) class UnicodeEmoji(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: orig_syntax = match.group('syntax') codepoint = unicode_emoji_to_codepoint(orig_syntax) if codepoint in codepoint_to_name: display_string = ':' + codepoint_to_name[codepoint] + ':' return make_emoji(codepoint, display_string) else: return None class Emoji(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Optional[Element]: orig_syntax = match.group("syntax") name = orig_syntax[1:-1] active_realm_emoji = {} # type: Dict[str, Dict[str, str]] db_data = self.markdown.zulip_db_data if db_data is not None: active_realm_emoji = db_data['active_realm_emoji'] if self.markdown.zulip_message and name in active_realm_emoji: return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax) elif name == 'zulip': return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax) elif name in name_to_codepoint: return make_emoji(name_to_codepoint[name], orig_syntax) else: return None def content_has_emoji_syntax(content: str) -> bool: return re.search(EMOJI_REGEX, content) is not None class ModalLink(markdown.inlinepatterns.Pattern): """ A pattern that allows including in-app modal links in messages. """ def handleMatch(self, match: Match[str]) -> Element: relative_url = match.group('relative_url') text = match.group('text') a_tag = markdown.util.etree.Element("a") a_tag.set("href", relative_url) a_tag.set("title", relative_url) a_tag.text = text return a_tag class Tex(markdown.inlinepatterns.Pattern): def handleMatch(self, match: Match[str]) -> Element: rendered = render_tex(match.group('body'), is_inline=True) if rendered is not None: return etree.fromstring(rendered.encode('utf-8')) else: # Something went wrong while rendering span = markdown.util.etree.Element('span') span.set('class', 'tex-error') span.text = '$$' + match.group('body') + '$$' return span upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$") def url_filename(url: str) -> str: """Extract the filename if a URL is an uploaded file, or return the original URL""" match = upload_title_re.match(url) if match: return match.group('filename') else: return url def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None: """Set certain attributes we want on every link.""" if target_blank: link.set('target', '_blank') link.set('title', url_filename(link.get('href'))) def sanitize_url(url: str) -> Optional[str]: """ Sanitize a url against xss attacks. See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url. """ try: parts = urllib.parse.urlparse(url.replace(' ', '%20')) scheme, netloc, path, params, query, fragment = parts except ValueError: # Bad url - so bad it couldn't be parsed. return '' # If there is no scheme or netloc and there is a '@' in the path, # treat it as a mailto: and set the appropriate scheme if scheme == '' and netloc == '' and '@' in path: scheme = 'mailto' elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/': # Allow domain-relative links return urllib.parse.urlunparse(('', '', path, params, query, fragment)) elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0: # Allow fragment links return urllib.parse.urlunparse(('', '', '', '', '', fragment)) # Zulip modification: If scheme is not specified, assume http:// # We re-enter sanitize_url because netloc etc. need to be re-parsed. if not scheme: return sanitize_url('http://' + url) locless_schemes = ['mailto', 'news', 'file', 'bitcoin'] if netloc == '' and scheme not in locless_schemes: # This fails regardless of anything else. # Return immediately to save additional processing return None # Upstream code will accept a URL like javascript://foo because it # appears to have a netloc. Additionally there are plenty of other # schemes that do weird things like launch external programs. To be # on the safe side, we whitelist the scheme. if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'): return None # Upstream code scans path, parameters, and query for colon characters # because # # some aliases [for javascript:] will appear to urllib.parse to have # no scheme. On top of that relative links (i.e.: "foo/bar.html") # have no scheme. # # We already converted an empty scheme to http:// above, so we skip # the colon check, which would also forbid a lot of legitimate URLs. # Url passes all tests. Return url as-is. return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment)) def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]: a = markdown.util.etree.Element('a') href = sanitize_url(url) target_blank = True if href is None: # Rejected by sanitize_url; render it as plain text. return url if text is None: text = markdown.util.AtomicString(url) href = rewrite_local_links_to_relative(db_data, href) target_blank = not href.startswith("#narrow") and not href.startswith('mailto:') a.set('href', href) a.text = text fixup_link(a, target_blank) return a class CompiledPattern(markdown.inlinepatterns.Pattern): def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None: # This is similar to the superclass's small __init__ function, # but we skip the compilation step and let the caller give us # a compiled regex. self.compiled_re = compiled_re self.md = md class AutoLink(CompiledPattern): def handleMatch(self, match: Match[str]) -> ElementStringNone: url = match.group('url') db_data = self.markdown.zulip_db_data return url_to_a(db_data, url) class UListProcessor(markdown.blockprocessors.UListProcessor): """ Process unordered list blocks. Based on markdown.blockprocessors.UListProcessor, but does not accept '+' or '-' as a bullet character.""" TAG = 'ul' RE = re.compile('^[ ]{0,3}[*][ ]+(.*)') def __init__(self, parser: Any) -> None: # HACK: Set the tab length to 2 just for the initialization of # this class, so that bulleted lists (and only bulleted lists) # work off 2-space indentation. parser.markdown.tab_length = 2 super().__init__(parser) parser.markdown.tab_length = 4 class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor): """ Process unordered list blocks. Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent """ def __init__(self, parser: Any) -> None: # HACK: Set the tab length to 2 just for the initialization of # this class, so that bulleted lists (and only bulleted lists) # work off 2-space indentation. parser.markdown.tab_length = 2 super().__init__(parser) parser.markdown.tab_length = 4 class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor): """ Process BlockQuotes. Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent """ # Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))' r'[ ]{0,3}>[ ]?(.*)') mention_re = re.compile(mention.find_mentions) def clean(self, line: str) -> str: # Silence all the mentions inside blockquotes line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line) # And then run the upstream processor's code for removing the '>' return super().clean(line) class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor): """ Allows unordered list blocks that come directly after a paragraph to be rendered as an unordered list Detects paragraphs that have a matching list item that comes directly after a line of text, and inserts a newline between to satisfy Markdown""" LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE) HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE) def run(self, lines: List[str]) -> List[str]: """ Insert a newline between a paragraph and ulist if missing """ inserts = 0 fence = None copy = lines[:] for i in range(len(lines) - 1): # Ignore anything that is inside a fenced code block m = FENCE_RE.match(lines[i]) if not fence and m: fence = m.group('fence') elif fence and m and fence == m.group('fence'): fence = None # If we're not in a fenced block and we detect an upcoming list # hanging off a paragraph, add a newline if (not fence and lines[i] and self.LI_RE.match(lines[i+1]) and not self.LI_RE.match(lines[i])): copy.insert(i+inserts+1, '') inserts += 1 return copy class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor): """ Finds a sequence of lines numbered by the same number""" RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)') TAB_LENGTH = 2 def run(self, lines: List[str]) -> List[str]: new_lines = [] # type: List[str] current_list = [] # type: List[Match[str]] current_indent = 0 for line in lines: m = self.RE.match(line) # Remember if this line is a continuation of already started list is_next_item = (m and current_list and current_indent == len(m.group(1)) // self.TAB_LENGTH) if not is_next_item: # There is no more items in the list we were processing new_lines.extend(self.renumber(current_list)) current_list = [] if not m: # Ordinary line new_lines.append(line) elif is_next_item: # Another list item current_list.append(m) else: # First list item current_list = [m] current_indent = len(m.group(1)) // self.TAB_LENGTH new_lines.extend(self.renumber(current_list)) return new_lines def renumber(self, mlist: List[Match[str]]) -> List[str]: if not mlist: return [] start_number = int(mlist[0].group(2)) # Change numbers only if every one is the same change_numbers = True for m in mlist: if int(m.group(2)) != start_number: change_numbers = False break lines = [] # type: List[str] counter = start_number for m in mlist: number = str(counter) if change_numbers else m.group(2) lines.append('%s%s. %s' % (m.group(1), number, m.group(3))) counter += 1 return lines # We need the following since upgrade from py-markdown 2.6.11 to 3.0.1 # modifies the link handling significantly. The following is taken from # py-markdown 2.6.11 markdown/inlinepatterns.py. @one_time def get_link_re() -> str: ''' Very important--if you need to change this code to depend on any arguments, you must eliminate the "one_time" decorator and consider performance implications. We only want to compute this value once. ''' NOBRACKET = r'[^\]\[]*' BRK = ( r'\[(' + (NOBRACKET + r'(\[')*6 + (NOBRACKET + r'\])*')*6 + NOBRACKET + r')\]' ) NOIMG = r'(?<!\!)' # [text](url) or [text](<url>) or [text](url "title") LINK_RE = NOIMG + BRK + \ r'''\(\s*(<(?:[^<>\\]|\\.)*>|(\([^()]*\)|[^()])*?)\s*(('(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*")\s*)?\)''' return normal_compile(LINK_RE) def prepare_realm_pattern(source: str) -> str: """ Augment a realm filter so it only matches after start-of-string, whitespace, or opening delimiters, won't match if there are word characters directly after, and saves what was matched as "name". """ return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + r')(?!\w)' # Given a regular expression pattern, linkifies groups that match it # using the provided format string to construct the URL. class RealmFilterPattern(markdown.inlinepatterns.Pattern): """ Applied a given realm filter to the input """ def __init__(self, source_pattern: str, format_string: str, markdown_instance: Optional[markdown.Markdown]=None) -> None: self.pattern = prepare_realm_pattern(source_pattern) self.format_string = format_string markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance) def handleMatch(self, m: Match[str]) -> Union[Element, str]: db_data = self.markdown.zulip_db_data return url_to_a(db_data, self.format_string % m.groupdict(), m.group("name")) class UserMentionPattern(markdown.inlinepatterns.Pattern): def handleMatch(self, m: Match[str]) -> Optional[Element]: match = m.group('match') silent = m.group('silent') == '_' db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: if match.startswith("**") and match.endswith("**"): name = match[2:-2] else: return None wildcard = mention.user_mention_matches_wildcard(name) id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name) if id_syntax_match: id = id_syntax_match.group("user_id") user = db_data['mention_data'].get_user_by_id(id) else: user = db_data['mention_data'].get_user_by_name(name) if wildcard: self.markdown.zulip_message.mentions_wildcard = True user_id = "*" elif user: if not silent: self.markdown.zulip_message.mentions_user_ids.add(user['id']) name = user['full_name'] user_id = str(user['id']) else: # Don't highlight @mentions that don't refer to a valid user return None el = markdown.util.etree.Element("span") el.set('data-user-id', user_id) if silent: el.set('class', 'user-mention silent') el.text = "%s" % (name,) else: el.set('class', 'user-mention') el.text = "@%s" % (name,) return el return None class UserGroupMentionPattern(markdown.inlinepatterns.Pattern): def handleMatch(self, m: Match[str]) -> Optional[Element]: match = m.group(2) db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: name = extract_user_group(match) user_group = db_data['mention_data'].get_user_group(name) if user_group: self.markdown.zulip_message.mentions_user_group_ids.add(user_group.id) name = user_group.name user_group_id = str(user_group.id) else: # Don't highlight @-mentions that don't refer to a valid user # group. return None el = markdown.util.etree.Element("span") el.set('class', 'user-group-mention') el.set('data-user-group-id', user_group_id) el.text = "@%s" % (name,) return el return None class StreamPattern(CompiledPattern): def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]: db_data = self.markdown.zulip_db_data if db_data is None: return None stream = db_data['stream_names'].get(name) return stream def handleMatch(self, m: Match[str]) -> Optional[Element]: name = m.group('stream_name') if self.markdown.zulip_message: stream = self.find_stream_by_name(name) if stream is None: return None el = markdown.util.etree.Element('a') el.set('class', 'stream') el.set('data-stream-id', str(stream['id'])) # TODO: We should quite possibly not be specifying the # href here and instead having the browser auto-add the # href when it processes a message with one of these, to # provide more clarity to API clients. stream_url = encode_stream(stream['id'], name) el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url)) el.text = '#{stream_name}'.format(stream_name=name) return el return None def possible_linked_stream_names(content: str) -> Set[str]: matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE) return set(matches) class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor): def run(self, lines: Iterable[str]) -> Iterable[str]: db_data = self.markdown.zulip_db_data if self.markdown.zulip_message and db_data is not None: # We check for alert words here, the set of which are # dependent on which users may see this message. # # Our caller passes in the list of possible_words. We # don't do any special rendering; we just append the alert words # we find to the set self.markdown.zulip_message.alert_words. realm_words = db_data['possible_words'] content = '\n'.join(lines).lower() allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]']) allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]']) for word in realm_words: escaped = re.escape(word.lower()) match_re = re.compile('(?:%s)%s(?:%s)' % (allowed_before_punctuation, escaped, allowed_after_punctuation)) if re.search(match_re, content): self.markdown.zulip_message.alert_words.add(word) return lines # This prevents realm_filters from running on the content of a # Markdown link, breaking up the link. This is a monkey-patch, but it # might be worth sending a version of this change upstream. class AtomicLinkPattern(CompiledPattern): def get_element(self, m: Match[str]) -> Optional[Element]: href = m.group(9) if not href: return None if href[0] == "<": href = href[1:-1] href = sanitize_url(self.unescape(href.strip())) if href is None: return None db_data = self.markdown.zulip_db_data href = rewrite_local_links_to_relative(db_data, href) el = markdown.util.etree.Element('a') el.text = m.group(2) el.set('href', href) fixup_link(el, target_blank=(href[:1] != '#')) return el def handleMatch(self, m: Match[str]) -> Optional[Element]: ret = self.get_element(m) if ret is None: return None if not isinstance(ret, str): ret.text = markdown.util.AtomicString(ret.text) return ret def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry: # Registry is a new class added by py-markdown to replace Ordered List. # Since Registry doesn't support .keys(), it is easier to make a new # object instead of removing keys from the existing object. new_r = markdown.util.Registry() for k in keys: new_r.register(r[k], k, r.get_index_for_name(k)) return new_r # These are used as keys ("realm_filters_keys") to md_engines and the respective # realm filter caches DEFAULT_BUGDOWN_KEY = -1 ZEPHYR_MIRROR_BUGDOWN_KEY = -2 class Bugdown(markdown.Markdown): def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None: # define default configs self.config = { "realm_filters": [kwargs['realm_filters'], "Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)], "realm": [kwargs['realm'], "Realm id"], "code_block_processor_disabled": [kwargs['code_block_processor_disabled'], "Disabled for email gateway"] } super().__init__(*args, **kwargs) self.set_output_format('html') def build_parser(self) -> markdown.Markdown: # Build the parser using selected default features from py-markdown. # The complete list of all available processors can be found in the # super().build_parser() function. # # Note: for any py-markdown updates, manually check if we want any # of the new features added upstream or not; they wouldn't get # included by default. self.preprocessors = self.build_preprocessors() self.parser = self.build_block_parser() self.inlinePatterns = self.build_inlinepatterns() self.treeprocessors = self.build_treeprocessors() self.postprocessors = self.build_postprocessors() self.handle_zephyr_mirror() return self def build_preprocessors(self) -> markdown.util.Registry: # We disable the following preprocessors from upstream: # # html_block - insecure # reference - references don't make sense in a chat context. preprocessors = markdown.util.Registry() preprocessors.register(AutoNumberOListPreprocessor(self), 'auto_number_olist', 40) preprocessors.register(BugdownUListPreprocessor(self), 'hanging_ulists', 35) preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30) preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25) preprocessors.register(AlertWordsNotificationProcessor(self), 'custom_text_notifications', 20) return preprocessors def build_block_parser(self) -> markdown.util.Registry: # We disable the following blockparsers from upstream: # # indent - replaced by ours # hashheader - disabled, since headers look bad and don't make sense in a chat context. # setextheader - disabled, since headers look bad and don't make sense in a chat context. # olist - replaced by ours # ulist - replaced by ours # quote - replaced by ours parser = markdown.blockprocessors.BlockParser(self) parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 85) if not self.getConfig('code_block_processor_disabled'): parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 80) # We get priority 75 from 'table' extension parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70) parser.blockprocessors.register(UListProcessor(parser), 'ulist', 65) parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 60) parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55) parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50) return parser def build_inlinepatterns(self) -> markdown.util.Registry: # We disable the following upstream inline patterns: # # backtick - replaced by ours # escape - probably will re-add at some point. # link - replaced by ours # image_link - replaced by ours # autolink - replaced by ours # automail - replaced by ours # linebreak - we use nl2br and consider that good enough # html - insecure # reference - references not useful # image_reference - references not useful # short_reference - references not useful # --------------------------------------------------- # strong_em - for these three patterns, # strong2 - we have our own versions where # emphasis2 - we disable _ for bold and emphasis # Declare regexes for clean single line calls to .register(). NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE # Custom strikethrough syntax: ~~foo~~ DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)' # Custom bold syntax: **foo** but not __foo__ # str inside ** must start and end with a word character # it need for things like "const char *x = (char *)y" EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*' ENTITY_RE = markdown.inlinepatterns.ENTITY_RE STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*' # Inline code block without whitespace stripping BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))' # Add Inline Patterns. We use a custom numbering of the # rules, that preserves the order from upstream but leaves # space for us to add our own. reg = markdown.util.Registry() reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105) reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100) reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95) reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90) reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85) reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80) reg.register(ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'), 'modal_link', 75) # Note that !gravatar syntax should be deprecated long term. reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70) reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65) reg.register(AtomicLinkPattern(get_link_re(), self), 'link', 60) reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55) # Reserve priority 45-54 for Realm Filters reg = self.register_realm_filters(reg) reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40) reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35) reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30) reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25) reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20) reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15) reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10) # We get priority 5 from 'nl2br' extension reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0) return reg def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry: for (pattern, format_string, id) in self.getConfig("realm_filters"): inlinePatterns.register(RealmFilterPattern(pattern, format_string, self), 'realm_filters/%s' % (pattern), 45) return inlinePatterns def build_treeprocessors(self) -> markdown.util.Registry: # Here we build all the processors from upstream, plus a few of our own. treeprocessors = markdown.util.Registry() # We get priority 30 from 'hilite' extension treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25) treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20) treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15) if settings.CAMO_URI: treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10) return treeprocessors def build_postprocessors(self) -> markdown.util.Registry: # These are the default python-markdown processors, unmodified. postprocessors = markdown.util.Registry() postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20) postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15) postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10) return postprocessors def getConfig(self, key: str, default: str='') -> Any: """ Return a setting for the given key or an empty string. """ if key in self.config: return self.config[key][0] else: return default def handle_zephyr_mirror(self) -> None: if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY: # Disable almost all inline patterns for zephyr mirror # users' traffic that is mirrored. Note that # inline_interesting_links is a treeprocessor and thus is # not removed self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink']) self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links', 'rewrite_to_https']) # insert new 'inline' processor because we have changed self.inlinePatterns # but InlineProcessor copies md as self.md in __init__. self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25) self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications']) self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph']) md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown] realm_filter_data = {} # type: Dict[int, List[Tuple[str, str, int]]] def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None: md_engine_key = (realm_filters_key, email_gateway) if md_engine_key in md_engines: del md_engines[md_engine_key] realm_filters = realm_filter_data[realm_filters_key] md_engines[md_engine_key] = build_engine( realm_filters=realm_filters, realm_filters_key=realm_filters_key, email_gateway=email_gateway, ) def build_engine(realm_filters: List[Tuple[str, str, int]], realm_filters_key: int, email_gateway: bool) -> markdown.Markdown: engine = Bugdown( realm_filters=realm_filters, realm=realm_filters_key, code_block_processor_disabled=email_gateway, extensions = [ nl2br.makeExtension(), tables.makeExtension(), codehilite.makeExtension( linenums=False, guess_lang=False ), ]) return engine def topic_links(realm_filters_key: int, topic_name: str) -> List[str]: matches = [] # type: List[str] realm_filters = realm_filters_for_realm(realm_filters_key) for realm_filter in realm_filters: pattern = prepare_realm_pattern(realm_filter[0]) for m in re.finditer(pattern, topic_name): matches += [realm_filter[1] % m.groupdict()] return matches def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None: # If realm_filters_key is None, load all filters global realm_filter_data if realm_filters_key is None: all_filters = all_realm_filters() all_filters[DEFAULT_BUGDOWN_KEY] = [] for realm_filters_key, filters in all_filters.items(): realm_filter_data[realm_filters_key] = filters make_md_engine(realm_filters_key, email_gateway) # Hack to ensure that getConfig("realm") is right for mirrored Zephyrs realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = [] make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False) else: realm_filters = realm_filters_for_realm(realm_filters_key) if realm_filters_key not in realm_filter_data or \ realm_filter_data[realm_filters_key] != realm_filters: # Realm filters data has changed, update `realm_filter_data` and any # of the existing markdown engines using this set of realm filters. realm_filter_data[realm_filters_key] = realm_filters for email_gateway_flag in [True, False]: if (realm_filters_key, email_gateway_flag) in md_engines: # Update only existing engines(if any), don't create new one. make_md_engine(realm_filters_key, email_gateway_flag) if (realm_filters_key, email_gateway) not in md_engines: # Markdown engine corresponding to this key doesn't exists so create one. make_md_engine(realm_filters_key, email_gateway) # We want to log Markdown parser failures, but shouldn't log the actual input # message for privacy reasons. The compromise is to replace all alphanumeric # characters with 'x'. # # We also use repr() to improve reproducibility, and to escape terminal control # codes, which can do surprisingly nasty things. _privacy_re = re.compile('\\w', flags=re.UNICODE) def privacy_clean_markdown(content: str) -> str: return repr(_privacy_re.sub('x', content)) def log_bugdown_error(msg: str) -> None: """We use this unusual logging approach to log the bugdown error, in order to prevent AdminNotifyHandler from sending the santized original markdown formatting into another Zulip message, which could cause an infinite exception loop.""" bugdown_logger.error(msg) def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]: if not emails: return dict() q_list = { Q(email__iexact=email.strip().lower()) for email in emails } rows = UserProfile.objects.filter( realm_id=realm_id ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'email', ) dct = { row['email'].strip().lower(): row for row in rows } return dct def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]: if not mention_texts: return list() # Remove the trailing part of the `name|id` mention syntax, # thus storing only full names in full_names. full_names = set() name_re = r'(?P<full_name>.+)\|\d+$' for mention_text in mention_texts: name_syntax_match = re.match(name_re, mention_text) if name_syntax_match: full_names.add(name_syntax_match.group("full_name")) else: full_names.add(mention_text) q_list = { Q(full_name__iexact=full_name) for full_name in full_names } rows = UserProfile.objects.filter( realm_id=realm_id, is_active=True, ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'full_name', 'email', ) return list(rows) class MentionData: def __init__(self, realm_id: int, content: str) -> None: mention_texts = possible_mentions(content) possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts) self.full_name_info = { row['full_name'].lower(): row for row in possible_mentions_info } self.user_id_info = { row['id']: row for row in possible_mentions_info } self.init_user_group_data(realm_id=realm_id, content=content) def init_user_group_data(self, realm_id: int, content: str) -> None: user_group_names = possible_user_group_mentions(content) self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names) self.user_group_members = defaultdict(list) # type: Dict[int, List[int]] group_ids = [group.id for group in self.user_group_name_info.values()] if not group_ids: # Early-return to avoid the cost of hitting the ORM, # which shows up in profiles. return membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids) for info in membership.values('user_group_id', 'user_profile_id'): group_id = info['user_group_id'] user_profile_id = info['user_profile_id'] self.user_group_members[group_id].append(user_profile_id) def get_user_by_name(self, name: str) -> Optional[FullNameInfo]: # warning: get_user_by_name is not dependable if two # users of the same full name are mentioned. Use # get_user_by_id where possible. return self.full_name_info.get(name.lower(), None) def get_user_by_id(self, id: str) -> Optional[FullNameInfo]: return self.user_id_info.get(int(id), None) def get_user_ids(self) -> Set[int]: """ Returns the user IDs that might have been mentioned by this content. Note that because this data structure has not parsed the message and does not know about escaping/code blocks, this will overestimate the list of user ids. """ return set(self.user_id_info.keys()) def get_user_group(self, name: str) -> Optional[UserGroup]: return self.user_group_name_info.get(name.lower(), None) def get_group_members(self, user_group_id: int) -> List[int]: return self.user_group_members.get(user_group_id, []) def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]: if not user_group_names: return dict() rows = UserGroup.objects.filter(realm_id=realm_id, name__in=user_group_names) dct = {row.name.lower(): row for row in rows} return dct def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]: if not stream_names: return dict() q_list = { Q(name=name) for name in stream_names } rows = get_active_streams( realm=realm, ).filter( functools.reduce(lambda a, b: a | b, q_list), ).values( 'id', 'name', ) dct = { row['name']: row for row in rows } return dct def do_convert(content: str, message: Optional[Message]=None, message_realm: Optional[Realm]=None, possible_words: Optional[Set[str]]=None, sent_by_bot: Optional[bool]=False, translate_emoticons: Optional[bool]=False, mention_data: Optional[MentionData]=None, email_gateway: Optional[bool]=False, no_previews: Optional[bool]=False) -> str: """Convert Markdown to HTML, with Zulip-specific settings and hacks.""" # This logic is a bit convoluted, but the overall goal is to support a range of use cases: # * Nothing is passed in other than content -> just run default options (e.g. for docs) # * message is passed, but no realm is -> look up realm from message # * message_realm is passed -> use that realm for bugdown purposes if message is not None: if message_realm is None: message_realm = message.get_realm() if message_realm is None: realm_filters_key = DEFAULT_BUGDOWN_KEY else: realm_filters_key = message_realm.id if message and hasattr(message, 'id') and message.id: logging_message_id = 'id# ' + str(message.id) else: logging_message_id = 'unknown' if message is not None and message_realm is not None: if message_realm.is_zephyr_mirror_realm: if message.sending_client.name == "zephyr_mirror": # Use slightly customized Markdown processor for content # delivered via zephyr_mirror realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY maybe_update_markdown_engines(realm_filters_key, email_gateway) md_engine_key = (realm_filters_key, email_gateway) if md_engine_key in md_engines: _md_engine = md_engines[md_engine_key] else: if DEFAULT_BUGDOWN_KEY not in md_engines: maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False) _md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)] # Reset the parser; otherwise it will get slower over time. _md_engine.reset() # Filters such as UserMentionPattern need a message. _md_engine.zulip_message = message _md_engine.zulip_realm = message_realm _md_engine.zulip_db_data = None # for now _md_engine.image_preview_enabled = image_preview_enabled( message, message_realm, no_previews) _md_engine.url_embed_preview_enabled = url_embed_preview_enabled( message, message_realm, no_previews) # Pre-fetch data from the DB that is used in the bugdown thread if message is not None: assert message_realm is not None # ensured above if message is not None if possible_words is None: possible_words = set() # Set[str] # Here we fetch the data structures needed to render # mentions/avatars/stream mentions from the database, but only # if there is syntax in the message that might use them, since # the fetches are somewhat expensive and these types of syntax # are uncommon enough that it's a useful optimization. if mention_data is None: mention_data = MentionData(message_realm.id, content) emails = possible_avatar_emails(content) email_info = get_email_info(message_realm.id, emails) stream_names = possible_linked_stream_names(content) stream_name_info = get_stream_name_info(message_realm, stream_names) if content_has_emoji_syntax(content): active_realm_emoji = message_realm.get_active_emoji() else: active_realm_emoji = dict() _md_engine.zulip_db_data = { 'possible_words': possible_words, 'email_info': email_info, 'mention_data': mention_data, 'active_realm_emoji': active_realm_emoji, 'realm_uri': message_realm.uri, 'sent_by_bot': sent_by_bot, 'stream_names': stream_name_info, 'translate_emoticons': translate_emoticons, } try: # Spend at most 5 seconds rendering; this protects the backend # from being overloaded by bugs (e.g. markdown logic that is # extremely inefficient in corner cases) as well as user # errors (e.g. a realm filter that makes some syntax # infinite-loop). rendered_content = timeout(5, _md_engine.convert, content) # Throw an exception if the content is huge; this protects the # rest of the codebase from any bugs where we end up rendering # something huge. if len(rendered_content) > MAX_MESSAGE_LENGTH * 10: raise BugdownRenderingException('Rendered content exceeds %s characters (message %s)' % (MAX_MESSAGE_LENGTH * 10, logging_message_id)) return rendered_content except Exception: cleaned = privacy_clean_markdown(content) # NOTE: Don't change this message without also changing the # logic in logging_handlers.py or we can create recursive # exceptions. exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s\n (message %s)' % (traceback.format_exc(), cleaned, logging_message_id)) bugdown_logger.exception(exception_message) raise BugdownRenderingException() finally: # These next three lines are slightly paranoid, since # we always set these right before actually using the # engine, but better safe then sorry. _md_engine.zulip_message = None _md_engine.zulip_realm = None _md_engine.zulip_db_data = None bugdown_time_start = 0.0 bugdown_total_time = 0.0 bugdown_total_requests = 0 def get_bugdown_time() -> float: return bugdown_total_time def get_bugdown_requests() -> int: return bugdown_total_requests def bugdown_stats_start() -> None: global bugdown_time_start bugdown_time_start = time.time() def bugdown_stats_finish() -> None: global bugdown_total_time global bugdown_total_requests global bugdown_time_start bugdown_total_requests += 1 bugdown_total_time += (time.time() - bugdown_time_start) def convert(content: str, message: Optional[Message]=None, message_realm: Optional[Realm]=None, possible_words: Optional[Set[str]]=None, sent_by_bot: Optional[bool]=False, translate_emoticons: Optional[bool]=False, mention_data: Optional[MentionData]=None, email_gateway: Optional[bool]=False, no_previews: Optional[bool]=False) -> str: bugdown_stats_start() ret = do_convert(content, message, message_realm, possible_words, sent_by_bot, translate_emoticons, mention_data, email_gateway, no_previews=no_previews) bugdown_stats_finish() return ret
./CrossVul/dataset_final_sorted/CWE-601/py/good_1315_0