INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
This method is called to create the default OPTIONS response. This can be changed through subclassing to change the default behavior of OPTIONS responses.
def make_default_options_response(self): """This method is called to create the default `OPTIONS` response. This can be changed through subclassing to change the default behavior of `OPTIONS` responses. .. versionadded:: 0.7 """ adapter = _request_ctx_stack.top.url_adapter if hasattr(adapter, 'allowed_methods'): methods = adapter.allowed_methods() else: # fallback for Werkzeug < 0.7 methods = [] try: adapter.match(method='--') except MethodNotAllowed as e: methods = e.valid_methods except HTTPException as e: pass rv = self.response_class() rv.allow.update(methods) return rv
Converts the return value from a view function to a real response object that is an instance of: attr: response_class.
def make_response(self, rv): """Converts the return value from a view function to a real response object that is an instance of :attr:`response_class`. The following types are allowed for `rv`: .. tabularcolumns:: |p{3.5cm}|p{9.5cm}| ======================= =========================================== :attr:`response_class` the object is returned unchanged :class:`str` a response object is created with the string as body :class:`unicode` a response object is created with the string encoded to utf-8 as body a WSGI function the function is called as WSGI application and buffered as response object :class:`tuple` A tuple in the form ``(response, status, headers)`` where `response` is any of the types defined here, `status` is a string or an integer and `headers` is a list of a dictionary with header values. ======================= =========================================== :param rv: the return value from the view function .. versionchanged:: 0.9 Previously a tuple was interpreted as the arguments for the response object. """ status = headers = None if isinstance(rv, tuple): rv, status, headers = rv + (None,) * (3 - len(rv)) if rv is None: raise ValueError('View function did not return a response') if not isinstance(rv, self.response_class): # When we create a response object directly, we let the constructor # set the headers and status. We do this because there can be # some extra logic involved when creating these objects with # specific values (like default content type selection). if isinstance(rv, (text_type, bytes, bytearray)): rv = self.response_class(rv, headers=headers, status=status) headers = status = None else: rv = self.response_class.force_type(rv, request.environ) if status is not None: if isinstance(status, string_types): rv.status = status else: rv.status_code = status if headers: rv.headers.extend(headers) return rv
Creates a URL adapter for the given request. The URL adapter is created at a point where the request context is not yet set up so the request is passed explicitly.
def create_url_adapter(self, request): """Creates a URL adapter for the given request. The URL adapter is created at a point where the request context is not yet set up so the request is passed explicitly. .. versionadded:: 0.6 .. versionchanged:: 0.9 This can now also be called without a request object when the URL adapter is created for the application context. """ if request is not None: return self.url_map.bind_to_environ(request.environ, server_name=self.config['SERVER_NAME']) # We need at the very least the server name to be set for this # to work. if self.config['SERVER_NAME'] is not None: return self.url_map.bind( self.config['SERVER_NAME'], script_name=self.config['APPLICATION_ROOT'] or '/', url_scheme=self.config['PREFERRED_URL_SCHEME'])
Injects the URL defaults for the given endpoint directly into the values dictionary passed. This is used internally and automatically called on URL building.
def inject_url_defaults(self, endpoint, values): """Injects the URL defaults for the given endpoint directly into the values dictionary passed. This is used internally and automatically called on URL building. .. versionadded:: 0.7 """ funcs = self.url_default_functions.get(None, ()) if '.' in endpoint: bp = endpoint.rsplit('.', 1)[0] funcs = chain(funcs, self.url_default_functions.get(bp, ())) for func in funcs: func(endpoint, values)
Handle: class: ~werkzeug. routing. BuildError on: meth: url_for.
def handle_url_build_error(self, error, endpoint, values): """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. """ exc_type, exc_value, tb = sys.exc_info() for handler in self.url_build_error_handlers: try: rv = handler(error, endpoint, values) if rv is not None: return rv except BuildError as error: pass # At this point we want to reraise the exception. If the error is # still the same one we can reraise it with the original traceback, # otherwise we raise it from here. if error is exc_value: reraise(exc_type, exc_value, tb) raise error
Called before the actual request dispatching and will call every as: meth: before_request decorated function. If any of these function returns a value it s handled as if it was the return value from the view and further request handling is stopped.
def preprocess_request(self): """Called before the actual request dispatching and will call every as :meth:`before_request` decorated function. If any of these function returns a value it's handled as if it was the return value from the view and further request handling is stopped. This also triggers the :meth:`url_value_processor` functions before the actual :meth:`before_request` functions are called. """ bp = _request_ctx_stack.top.request.blueprint funcs = self.url_value_preprocessors.get(None, ()) if bp is not None and bp in self.url_value_preprocessors: funcs = chain(funcs, self.url_value_preprocessors[bp]) for func in funcs: func(request.endpoint, request.view_args) funcs = self.before_request_funcs.get(None, ()) if bp is not None and bp in self.before_request_funcs: funcs = chain(funcs, self.before_request_funcs[bp]) for func in funcs: rv = func() if rv is not None: return rv
Can be overridden in order to modify the response object before it s sent to the WSGI server. By default this will call all the: meth: after_request decorated functions.
def process_response(self, response): """Can be overridden in order to modify the response object before it's sent to the WSGI server. By default this will call all the :meth:`after_request` decorated functions. .. versionchanged:: 0.5 As of Flask 0.5 the functions registered for after request execution are called in reverse order of registration. :param response: a :attr:`response_class` object. :return: a new response object or the same, has to be an instance of :attr:`response_class`. """ ctx = _request_ctx_stack.top bp = ctx.request.blueprint funcs = ctx._after_request_functions if bp is not None and bp in self.after_request_funcs: funcs = chain(funcs, reversed(self.after_request_funcs[bp])) if None in self.after_request_funcs: funcs = chain(funcs, reversed(self.after_request_funcs[None])) for handler in funcs: response = handler(response) if not self.session_interface.is_null_session(ctx.session): self.save_session(ctx.session, response) return response
Called after the actual request dispatching and will call every as: meth: teardown_request decorated function. This is not actually called by the: class: Flask object itself but is always triggered when the request context is popped. That way we have a tighter control over certain resources under testing environments.
def do_teardown_request(self, exc=None): """Called after the actual request dispatching and will call every as :meth:`teardown_request` decorated function. This is not actually called by the :class:`Flask` object itself but is always triggered when the request context is popped. That way we have a tighter control over certain resources under testing environments. .. versionchanged:: 0.9 Added the `exc` argument. Previously this was always using the current exception information. """ if exc is None: exc = sys.exc_info()[1] funcs = reversed(self.teardown_request_funcs.get(None, ())) bp = _request_ctx_stack.top.request.blueprint if bp is not None and bp in self.teardown_request_funcs: funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) for func in funcs: rv = func(exc) request_tearing_down.send(self, exc=exc)
Called when an application context is popped. This works pretty much the same as: meth: do_teardown_request but for the application context.
def do_teardown_appcontext(self, exc=None): """Called when an application context is popped. This works pretty much the same as :meth:`do_teardown_request` but for the application context. .. versionadded:: 0.9 """ if exc is None: exc = sys.exc_info()[1] for func in reversed(self.teardown_appcontext_funcs): func(exc) appcontext_tearing_down.send(self, exc=exc)
The actual WSGI application. This is not implemented in __call__ so that middlewares can be applied without losing a reference to the class. So instead of doing this::
def wsgi_app(self, environ, start_response): """The actual WSGI application. This is not implemented in `__call__` so that middlewares can be applied without losing a reference to the class. So instead of doing this:: app = MyMiddleware(app) It's a better idea to do this instead:: app.wsgi_app = MyMiddleware(app.wsgi_app) Then you still have the original application object around and can continue to call methods on it. .. versionchanged:: 0.7 The behavior of the before and after request callbacks was changed under error conditions and a new callback was added that will always execute at the end of the request, independent on if an error occurred or not. See :ref:`callbacks-and-errors`. :param environ: a WSGI environment :param start_response: a callable accepting a status code, a list of headers and an optional exception context to start the response """ ctx = self.request_context(environ) ctx.push() error = None try: try: response = self.full_dispatch_request() except Exception as e: error = e response = self.make_response(self.handle_exception(e)) return response(environ, start_response) finally: if self.should_ignore_error(error): error = None ctx.auto_pop(error)
Yield unique values in iterable preserving order.
def unique(iterable): """ Yield unique values in iterable, preserving order. """ seen = set() for value in iterable: if not value in seen: seen.add(value) yield value
Place the runtime requirements from pkg_info into metadata.
def handle_requires(metadata, pkg_info, key): """ Place the runtime requirements from pkg_info into metadata. """ may_requires = defaultdict(list) for value in pkg_info.get_all(key): extra_match = EXTRA_RE.search(value) if extra_match: groupdict = extra_match.groupdict() condition = groupdict['condition'] extra = groupdict['extra'] package = groupdict['package'] if condition.endswith(' and '): condition = condition[:-5] else: condition, extra = None, None package = value key = MayRequiresKey(condition, extra) may_requires[key].append(package) if may_requires: metadata['run_requires'] = [] for key, value in may_requires.items(): may_requirement = {'requires':value} if key.extra: may_requirement['extra'] = key.extra if key.condition: may_requirement['environment'] = key.condition metadata['run_requires'].append(may_requirement) if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
Convert PKG - INFO to a prototype Metadata 2. 0 ( PEP 426 ) dict.
def pkginfo_to_dict(path, distribution=None): """ Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict. The description is included under the key ['description'] rather than being written to a separate file. path: path to PKG-INFO file distribution: optional distutils Distribution() """ metadata = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) metadata["generator"] = "bdist_wheel (" + wheel.__version__ + ")" try: unicode pkg_info = read_pkg_info(path) except NameError: pkg_info = email.parser.Parser().parsestr(open(path, 'rb').read().decode('utf-8')) description = None if pkg_info['Summary']: metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary') del pkg_info['Summary'] if pkg_info['Description']: description = dedent_description(pkg_info) del pkg_info['Description'] else: payload = pkg_info.get_payload() if isinstance(payload, bytes): # Avoid a Python 2 Unicode error. # We still suffer ? glyphs on Python 3. payload = payload.decode('utf-8') if payload: description = payload if description: pkg_info['description'] = description for key in unique(k.lower() for k in pkg_info.keys()): low_key = key.replace('-', '_') if low_key in SKIP_FIELDS: continue if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN': continue if low_key in PLURAL_FIELDS: metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key) elif low_key == "requires_dist": handle_requires(metadata, pkg_info, key) elif low_key == 'provides_extra': if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend(pkg_info.get_all(key)) elif low_key == 'home_page': metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]} elif low_key == 'keywords': metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key]) else: metadata[low_key] = pkg_info[key] metadata['metadata_version'] = METADATA_VERSION if 'extras' in metadata: metadata['extras'] = sorted(set(metadata['extras'])) # include more information if distribution is available if distribution: for requires, attr in (('test_requires', 'tests_require'),): try: requirements = getattr(distribution, attr) if isinstance(requirements, list): new_requirements = list(convert_requirements(requirements)) metadata[requires] = [{'requires':new_requirements}] except AttributeError: pass # handle contacts contacts = [] for contact_type, role in CONTACT_FIELDS: contact = {} for key in contact_type: if contact_type[key] in metadata: contact[key] = metadata.pop(contact_type[key]) if contact: contact['role'] = role contacts.append(contact) if contacts: metadata['extensions']['python.details']['contacts'] = contacts # convert entry points to exports try: with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file: ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read()) exports = {} for group, items in ep_map.items(): exports[group] = {} for item in items.values(): name, export = str(item).split(' = ', 1) exports[group][name] = export if exports: metadata['extensions']['python.exports'] = exports except IOError: pass # copy console_scripts entry points to commands if 'python.exports' in metadata['extensions']: for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'), ('gui_scripts', 'wrap_gui')): if ep_script in metadata['extensions']['python.exports']: metadata['extensions']['python.commands'][wrap_script] = \ metadata['extensions']['python.exports'][ep_script] return metadata
Compose the version predicates for requirement in PEP 345 fashion.
def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) if not requires_dist: return '' return " (%s)" % ','.join(requires_dist)
Convert. egg - info directory with PKG - INFO to the Metadata 1. 3 aka old - draft Metadata 2. 0 format.
def pkginfo_to_metadata(egg_info_path, pkginfo_path): """ Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka old-draft Metadata 2.0 format. """ pkg_info = read_pkg_info(pkginfo_path) pkg_info.replace_header('Metadata-Version', '2.0') requires_path = os.path.join(egg_info_path, 'requires.txt') if os.path.exists(requires_path): requires = open(requires_path).read() for extra, reqs in pkg_resources.split_sections(requires): condition = '' if extra and ':' in extra: # setuptools extra:condition syntax extra, condition = extra.split(':', 1) if extra: pkg_info['Provides-Extra'] = extra if condition: condition += " and " condition += 'extra == %s' % repr(extra) if condition: condition = '; ' + condition for new_req in convert_requirements(reqs): pkg_info['Requires-Dist'] = new_req + condition description = pkg_info['Description'] if description: pkg_info.set_payload(dedent_description(pkg_info)) del pkg_info['Description'] return pkg_info
break up a module path to its various parts ( prefix module class method )
def set_possible(self): ''' break up a module path to its various parts (prefix, module, class, method) this uses PEP 8 conventions, so foo.Bar would be foo module with class Bar return -- list -- a list of possible interpretations of the module path (eg, foo.bar can be bar module in foo module, or bar method in foo module) ''' possible = [] name = self.name logger.debug('Guessing test name: {}'.format(name)) name_f = self.name.lower() filepath = "" if name_f.endswith(".py") or ".py:" in name_f: # path/something:Class.method bits = name.split(":", 1) filepath = bits[0] logger.debug('Found filepath: {}'.format(filepath)) name = bits[1] if len(bits) > 1 else "" if name: logger.debug('Found test name: {} for filepath: {}'.format(name, filepath)) bits = name.split('.') basedir = self.basedir method_prefix = self.method_prefix # check if the last bit is a Class if re.search(r'^\*?[A-Z]', bits[-1]): logger.debug('Found class in name: {}'.format(bits[-1])) possible.append(PathFinder(basedir, method_prefix, **{ 'class_name': bits[-1], 'module_name': bits[-2] if len(bits) > 1 else '', 'prefix': os.sep.join(bits[0:-2]), 'filepath': filepath, })) elif len(bits) > 1 and re.search(r'^\*?[A-Z]', bits[-2]): logger.debug('Found class in name: {}'.format(bits[-2])) possible.append(PathFinder(basedir, method_prefix, **{ 'class_name': bits[-2], 'method_name': bits[-1], 'module_name': bits[-3] if len(bits) > 2 else '', 'prefix': os.sep.join(bits[0:-3]), 'filepath': filepath, })) else: if self.name: if filepath: if len(bits): possible.append(PathFinder(basedir, method_prefix, **{ 'filepath': filepath, 'method_name': bits[0], })) else: possible.append(PathFinder(basedir, method_prefix, **{ 'filepath': filepath, })) else: logger.debug('Test name is ambiguous') possible.append(PathFinder(basedir, method_prefix, **{ 'module_name': bits[-1], 'prefix': os.sep.join(bits[0:-1]), 'filepath': filepath, })) possible.append(PathFinder(basedir, method_prefix, **{ 'method_name': bits[-1], 'module_name': bits[-2] if len(bits) > 1 else '', 'prefix': os.sep.join(bits[0:-2]), 'filepath': filepath, })) possible.append(PathFinder(basedir, method_prefix, **{ 'prefix': os.sep.join(bits), 'filepath': filepath, })) else: possible.append(PathFinder(basedir, method_prefix, filepath=filepath)) logger.debug("Found {} possible test names".format(len(possible))) self.possible = possible
return modules that match module_name
def modules(self): """return modules that match module_name""" # since the module has to be importable we go ahead and put the # basepath as the very first path to check as that should minimize # namespace collisions, this is what unittest does also sys.path.insert(0, self.basedir) for p in self.paths(): # http://stackoverflow.com/questions/67631/ try: module_name = self.module_path(p) logger.debug("Importing {} from path {}".format(module_name, p)) m = importlib.import_module(module_name) yield m except Exception as e: logger.warning('Caught exception while importing {}: {}'.format(p, e)) logger.warning(e, exc_info=True) error_info = getattr(self, 'error_info', None) if not error_info: exc_info = sys.exc_info() #raise e.__class__, e, exc_info[2] #self.error_info = (e, exc_info) self.error_info = exc_info continue sys.path.pop(0)
the partial self. class_name will be used to find actual TestCase classes
def classes(self): """the partial self.class_name will be used to find actual TestCase classes""" for module in self.modules(): cs = inspect.getmembers(module, inspect.isclass) class_name = getattr(self, 'class_name', '') class_regex = '' if class_name: if class_name.startswith("*"): class_name = class_name.strip("*") class_regex = re.compile(r'.*?{}'.format(class_name), re.I) else: class_regex = re.compile(r'^{}'.format(class_name), re.I) for c_name, c in cs: can_yield = True if class_regex and not class_regex.match(c_name): #if class_name and class_name not in c_name: can_yield = False if can_yield and issubclass(c, unittest.TestCase): if c is not unittest.TestCase: # ignore actual TestCase class logger.debug('class: {} matches {}'.format(c_name, class_name)) yield c
return the actual test methods that matched self. method_name
def method_names(self): """return the actual test methods that matched self.method_name""" for c in self.classes(): #ms = inspect.getmembers(c, inspect.ismethod) # http://stackoverflow.com/questions/17019949/ ms = inspect.getmembers(c, lambda f: inspect.ismethod(f) or inspect.isfunction(f)) method_name = getattr(self, 'method_name', '') method_regex = '' if method_name: if method_name.startswith(self.method_prefix): method_regex = re.compile(r'^{}'.format(method_name), flags=re.I) else: if method_name.startswith("*"): method_name = method_name.strip("*") method_regex = re.compile( r'^{}[_]{{0,1}}.*?{}'.format(self.method_prefix, method_name), flags=re.I ) else: method_regex = re.compile( r'^{}[_]{{0,1}}{}'.format(self.method_prefix, method_name), flags=re.I ) for m_name, m in ms: if not m_name.startswith(self.method_prefix): continue can_yield = True if method_regex and not method_regex.match(m_name): can_yield = False if can_yield: logger.debug('method: {} matches {}'.format(m_name, method_name)) yield c, m_name
check if name combined with test prefixes or postfixes is found anywhere in the list of basenames
def _find_basename(self, name, basenames, is_prefix=False): """check if name combined with test prefixes or postfixes is found anywhere in the list of basenames :param name: string, the name you're searching for :param basenames: list, a list of basenames to check :param is_prefix: bool, True if this is a prefix search, which means it will also check if name matches any of the basenames without the prefixes or postfixes, if it is False then the prefixes or postfixes must be present (ie, the module we're looking for is the actual test module, not the parent modules it's contained in) :returns: string, the basename if it is found """ ret = "" fileroots = [(os.path.splitext(n)[0], n) for n in basenames] glob = False if name.startswith("*"): glob = True name = name.strip("*") for fileroot, basename in fileroots: if name in fileroot or fileroot in name: for pf in self.module_postfixes: logger.debug( 'Checking if basename {} starts with {} and ends with {}'.format( basename, name, pf )) if glob: if name in fileroot and fileroot.endswith(pf): ret = basename break else: if fileroot.startswith(name) and fileroot.endswith(pf): ret = basename break if not ret: for pf in self.module_prefixes: n = pf + name logger.debug('Checking if basename {} starts with {}'.format(basename, n)) if glob: if fileroot.startswith(pf) and name in fileroot: ret = basename break else: if fileroot.startswith(n): ret = basename break if not ret: if is_prefix: logger.debug('Checking if basename {} starts with {}'.format(basename, name)) if basename.startswith(name) or (glob and name in basename): ret = basename else: logger.debug( 'Checking if basename {} starts with {} and is a test module'.format( basename, name )) if glob: if name in basename and self._is_module_path(basename): ret = basename else: if basename.startswith(name) and self._is_module_path(basename): ret = basename if ret: logger.debug('Found basename {}'.format(ret)) break return ret
Similar to _find_prefix_paths () but only returns the first match
def _find_prefix_path(self, basedir, prefix): """Similar to _find_prefix_paths() but only returns the first match""" ret = "" for ret in self._find_prefix_paths(basedir, prefix): break if not ret: raise IOError("Could not find prefix {} in path {}".format(prefix, basedir)) return ret
Returns true if the passed in path is a test module path
def _is_module_path(self, path): """Returns true if the passed in path is a test module path :param path: string, the path to check, will need to start or end with the module test prefixes or postfixes to be considered valid :returns: boolean, True if a test module path, False otherwise """ ret = False basename = os.path.basename(path) fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): ret = True break if not ret: for pf in self.module_prefixes: if fileroot.startswith(pf): ret = True break return ret
Walk all the directories of basedir except hidden directories
def walk(self, basedir): """Walk all the directories of basedir except hidden directories :param basedir: string, the directory to walk :returns: generator, same as os.walk """ system_d = SitePackagesDir() filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d for root, dirs, files in os.walk(basedir, topdown=True): # ignore dot directories and private directories (start with underscore) dirs[:] = [d for d in dirs if d[0] != '.' and d[0] != "_"] if filter_system_d: dirs[:] = [d for d in dirs if not d.startswith(system_d)] yield root, dirs, files
given a basedir yield all test modules paths recursively found in basedir that are test modules
def paths(self): ''' given a basedir, yield all test modules paths recursively found in basedir that are test modules return -- generator ''' module_name = getattr(self, 'module_name', '') module_prefix = getattr(self, 'prefix', '') filepath = getattr(self, 'filepath', '') if filepath: if os.path.isabs(filepath): yield filepath else: yield os.path.join(self.basedir, filepath) else: if module_prefix: basedirs = self._find_prefix_paths(self.basedir, module_prefix) else: basedirs = [self.basedir] for basedir in basedirs: try: if module_name: path = self._find_module_path(basedir, module_name) else: path = basedir if os.path.isfile(path): logger.debug('Module path: {}'.format(path)) yield path else: seen_paths = set() for root, dirs, files in self.walk(path): for basename in files: if basename.startswith("__init__"): if self._is_module_path(root): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module package path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath else: fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module postfix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath for pf in self.module_prefixes: if fileroot.startswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module prefix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath except IOError as e: # we failed to find a suitable path logger.warning(e, exc_info=True) pass
given a filepath like/ base/ path/ to/ module. py this will convert it to path. to. module so it can be imported
def module_path(self, filepath): """given a filepath like /base/path/to/module.py this will convert it to path.to.module so it can be imported""" possible_modbits = re.split('[\\/]', filepath.strip('\\/')) basename = possible_modbits[-1] prefixes = possible_modbits[0:-1] modpath = [] discarded = [] # find the first directory that has an __init__.py for i in range(len(prefixes)): path_args = ["/"] path_args.extend(prefixes[0:i+1]) path_args.append('__init__.py') prefix_module = os.path.join(*path_args) #logger.debug("Checking prefix modulepath: {}".format(prefix_module)) if os.path.isfile(prefix_module): #logger.debug("Found start of modulepath: {}".format(prefixes[i])) modpath = prefixes[i:] break else: discarded = path_args[0:-1] modpath.append(basename) # convert the remaining file path to a python module path that can be imported module_name = '.'.join(modpath) module_name = re.sub(r'(?:\.__init__)?\.py$', '', module_name, flags=re.I) logger.debug("Module path {} found in filepath {}".format(module_name, filepath)) return module_name
Remove paths in self. paths with confirmation ( unless auto_confirm is True ).
def remove(self, auto_confirm=False): """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" if not self._can_uninstall(): return if not self.paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", self.dist.project_name, ) return logger.info( 'Uninstalling %s-%s:', self.dist.project_name, self.dist.version ) with indent_log(): paths = sorted(self.compact(self.paths)) if auto_confirm: response = 'y' else: for path in paths: logger.info(path) response = ask('Proceed (y/n)? ', ('y', 'n')) if self._refuse: logger.info('Not removing or modifying (outside of prefix):') for path in self.compact(self._refuse): logger.info(path) if response == 'y': self.save_dir = tempfile.mkdtemp(suffix='-uninstall', prefix='pip-') for path in paths: new_path = self._stash(path) logger.debug('Removing file or directory %s', path) self._moved_paths.append(path) renames(path, new_path) for pth in self.pth.values(): pth.remove() logger.info( 'Successfully uninstalled %s-%s', self.dist.project_name, self.dist.version )
Rollback the changes previously made by remove ().
def rollback(self): """Rollback the changes previously made by remove().""" if self.save_dir is None: logger.error( "Can't roll back %s; was not uninstalled", self.dist.project_name, ) return False logger.info('Rolling back uninstall of %s', self.dist.project_name) for path in self._moved_paths: tmp_path = self._stash(path) logger.debug('Replacing %s', path) renames(tmp_path, path) for pth in self.pth.values(): pth.rollback()
Remove temporary save dir: rollback will no longer be possible.
def commit(self): """Remove temporary save dir: rollback will no longer be possible.""" if self.save_dir is not None: rmtree(self.save_dir) self.save_dir = None self._moved_paths = []
Inject default arguments for dump functions.
def _dump_arg_defaults(kwargs): """Inject default arguments for dump functions.""" if current_app: kwargs.setdefault('cls', current_app.json_encoder) if not current_app.config['JSON_AS_ASCII']: kwargs.setdefault('ensure_ascii', False) kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) else: kwargs.setdefault('sort_keys', True) kwargs.setdefault('cls', JSONEncoder)
Inject default arguments for load functions.
def _load_arg_defaults(kwargs): """Inject default arguments for load functions.""" if current_app: kwargs.setdefault('cls', current_app.json_decoder) else: kwargs.setdefault('cls', JSONDecoder)
Serialize obj to a JSON formatted str by using the application s configured encoder (: attr: ~flask. Flask. json_encoder ) if there is an application on the stack.
def dumps(obj, **kwargs): """Serialize ``obj`` to a JSON formatted ``str`` by using the application's configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an application on the stack. This function can return ``unicode`` strings or ascii-only bytestrings by default which coerce into unicode strings automatically. That behavior by default is controlled by the ``JSON_AS_ASCII`` configuration variable and can be overriden by the simplejson ``ensure_ascii`` parameter. """ _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) rv = _json.dumps(obj, **kwargs) if encoding is not None and isinstance(rv, text_type): rv = rv.encode(encoding) return rv
Like: func: dumps but writes into a file object.
def dump(obj, fp, **kwargs): """Like :func:`dumps` but writes into a file object.""" _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) if encoding is not None: fp = _wrap_writer_for_text(fp, encoding) _json.dump(obj, fp, **kwargs)
Unserialize a JSON object from a string s by using the application s configured decoder (: attr: ~flask. Flask. json_decoder ) if there is an application on the stack.
def loads(s, **kwargs): """Unserialize a JSON object from a string ``s`` by using the application's configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an application on the stack. """ _load_arg_defaults(kwargs) if isinstance(s, bytes): s = s.decode(kwargs.pop('encoding', None) or 'utf-8') return _json.loads(s, **kwargs)
Like: func: loads but reads from a file object.
def load(fp, **kwargs): """Like :func:`loads` but reads from a file object. """ _load_arg_defaults(kwargs) if not PY2: fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') return _json.load(fp, **kwargs)
Works exactly like: func: dumps but is safe for use in <script > tags. It accepts the same arguments and returns a JSON string. Note that this is available in templates through the |tojson filter which will also mark the result as safe. Due to how this function escapes certain characters this is safe even if used outside of <script > tags.
def htmlsafe_dumps(obj, **kwargs): """Works exactly like :func:`dumps` but is safe for use in ``<script>`` tags. It accepts the same arguments and returns a JSON string. Note that this is available in templates through the ``|tojson`` filter which will also mark the result as safe. Due to how this function escapes certain characters this is safe even if used outside of ``<script>`` tags. The following characters are escaped in strings: - ``<`` - ``>`` - ``&`` - ``'`` This makes it safe to embed such strings in any place in HTML with the notable exception of double quoted attributes. In that case single quote your attributes or HTML escape it in addition. .. versionchanged:: 0.10 This function's return value is now always safe for HTML usage, even if outside of script tags or if used in XHTML. This rule does not hold true when using this function in HTML attributes that are double quoted. Always single quote attributes if you use the ``|tojson`` filter. Alternatively use ``|tojson|forceescape``. """ rv = dumps(obj, **kwargs) \ .replace(u'<', u'\\u003c') \ .replace(u'>', u'\\u003e') \ .replace(u'&', u'\\u0026') \ .replace(u"'", u'\\u0027') if not _slash_escape: rv = rv.replace('\\/', '/') return rv
Like: func: htmlsafe_dumps but writes into a file object.
def htmlsafe_dump(obj, fp, **kwargs): """Like :func:`htmlsafe_dumps` but writes into a file object.""" fp.write(unicode(htmlsafe_dumps(obj, **kwargs)))
Creates a: class: ~flask. Response with the JSON representation of the given arguments with an application/ json mimetype. The arguments to this function are the same as to the: class: dict constructor.
def jsonify(*args, **kwargs): """Creates a :class:`~flask.Response` with the JSON representation of the given arguments with an `application/json` mimetype. The arguments to this function are the same as to the :class:`dict` constructor. Example usage:: from flask import jsonify @app.route('/_get_current_user') def get_current_user(): return jsonify(username=g.user.username, email=g.user.email, id=g.user.id) This will send a JSON response like this to the browser:: { "username": "admin", "email": "admin@localhost", "id": 42 } For security reasons only objects are supported toplevel. For more information about this, have a look at :ref:`json-security`. This function's response will be pretty printed if it was not requested with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false. .. versionadded:: 0.2 """ indent = None if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \ and not request.is_xhr: indent = 2 return current_app.response_class(dumps(dict(*args, **kwargs), indent=indent), mimetype='application/json')
Implement this method in a subclass such that it returns a serializable object for o or calls the base implementation ( to raise a TypeError ).
def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ if isinstance(o, datetime): return http_date(o) if isinstance(o, uuid.UUID): return str(o) if hasattr(o, '__html__'): return text_type(o.__html__()) return _json.JSONEncoder.default(self, o)
Convert the characters & < > and in string s to HTML - safe sequences. Use this if you need to display text that might contain such characters in HTML. Marks return value as markup string.
def escape(s): """Convert the characters &, <, >, ' and " in string s to HTML-safe sequences. Use this if you need to display text that might contain such characters in HTML. Marks return value as markup string. """ if hasattr(s, '__html__'): return s.__html__() return Markup(text_type(s) .replace('&', '&amp;') .replace('>', '&gt;') .replace('<', '&lt;') .replace("'", '&#39;') .replace('"', '&#34;') )
Sets multiple keys and values from a mapping.
def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). :returns: Whether all given keys have been set. :rtype: boolean """ rv = True for key, value in _items(mapping): if not self.set(key, value, timeout): rv = False return rv
Increments the value of a key by delta. If the key does not yet exist it is initialized with delta.
def inc(self, key, delta=1): """Increments the value of a key by `delta`. If the key does not yet exist it is initialized with `delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to add. :returns: The new value or ``None`` for backend errors. """ value = (self.get(key) or 0) + delta return value if self.set(key, value) else None
Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else.
def dump_object(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t in integer_types: return str(value).encode('ascii') return b'!' + pickle.dumps(value)
The reversal of: meth: dump_object. This might be callde with None.
def load_object(self, value): """The reversal of :meth:`dump_object`. This might be callde with None. """ if value is None: return None if value.startswith(b'!'): try: return pickle.loads(value[1:]) except pickle.PickleError: return None try: return int(value) except ValueError: # before 0.8 we did not have serialization. Still support that. return value
Strip req postfix ( - dev 0. 2 etc )
def _strip_postfix(req): """ Strip req postfix ( -dev, 0.2, etc ) """ # FIXME: use package_to_requirement? match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) if match: # Strip off -dev, -0.2, etc. req = match.group(1) return req
This method generates a dictionary of the query string parameters contained in a given editable URL.
def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)") matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception("%s option already defined" % name) ret[name] = value return ret return None
Parses an editable requirement into: - a requirement name - an URL - extras - editable options Accepted requirements: svn + http:// blahblah
def parse_editable(editable_req, default_vcs=None): """Parses an editable requirement into: - a requirement name - an URL - extras - editable options Accepted requirements: svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir .[some_extra] """ url = editable_req extras = None # If a file path is specified with extras, strip off the extras. m = re.match(r'^(.+)(\[[^\]]+\])$', url) if m: url_no_extras = m.group(1) extras = m.group(2) else: url_no_extras = url if os.path.isdir(url_no_extras): if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): raise InstallationError( "Directory %r is not installable. File 'setup.py' not found." % url_no_extras ) # Treating it as code that has already been checked out url_no_extras = path_to_url(url_no_extras) if url_no_extras.lower().startswith('file:'): if extras: return ( None, url_no_extras, pkg_resources.Requirement.parse( '__placeholder__' + extras ).extras, {}, ) else: return None, url_no_extras, None, {} for version_control in vcs: if url.lower().startswith('%s:' % version_control): url = '%s+%s' % (version_control, url) break if '+' not in url: if default_vcs: url = default_vcs + '+' + url else: raise InstallationError( '%s should either be a path to a local project or a VCS url ' 'beginning with svn+, git+, hg+, or bzr+' % editable_req ) vc_type = url.split('+', 1)[0].lower() if not vcs.get_backend(vc_type): error_message = 'For --editable=%s only ' % editable_req + \ ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ ' is currently supported' raise InstallationError(error_message) try: options = _build_editable_options(editable_req) except Exception as exc: raise InstallationError( '--editable=%s error in editable options:%s' % (editable_req, exc) ) if not options or 'egg' not in options: req = _build_req_from_url(editable_req) if not req: raise InstallationError( '--editable=%s is not the right format; it must have ' '#egg=Package' % editable_req ) else: req = options['egg'] package = _strip_postfix(req) return package, url, None, options
Creates an InstallRequirement from a name which might be a requirement directory containing setup. py filename or URL.
def from_line( cls, name, comes_from=None, isolated=False, options=None, wheel_cache=None): """Creates an InstallRequirement from a name, which might be a requirement, directory containing 'setup.py', filename, or URL. """ from pip.index import Link if is_url(name): marker_sep = '; ' else: marker_sep = ';' if marker_sep in name: name, markers = name.split(marker_sep, 1) markers = markers.strip() if not markers: markers = None else: markers = None name = name.strip() req = None path = os.path.normpath(os.path.abspath(name)) link = None extras = None if is_url(name): link = Link(name) else: p, extras = _strip_extras(path) if (os.path.isdir(p) and (os.path.sep in name or name.startswith('.'))): if not is_installable_dir(p): raise InstallationError( "Directory %r is not installable. File 'setup.py' " "not found." % name ) link = Link(path_to_url(p)) elif is_archive_file(p): if not os.path.isfile(p): logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) link = Link(path_to_url(p)) # it's a local file, dir, or url if link: # Handle relative file URLs if link.scheme == 'file' and re.search(r'\.\./', link.url): link = Link( path_to_url(os.path.normpath(os.path.abspath(link.path)))) # wheel file if link.is_wheel: wheel = Wheel(link.filename) # can raise InvalidWheelFilename if not wheel.supported(): raise UnsupportedWheel( "%s is not a supported wheel on this platform." % wheel.filename ) req = "%s==%s" % (wheel.name, wheel.version) else: # set the req to the egg fragment. when it's not there, this # will become an 'unnamed' requirement req = link.egg_fragment # a requirement specifier else: req = name options = options if options else {} res = cls(req, comes_from, link=link, markers=markers, isolated=isolated, options=options, wheel_cache=wheel_cache) if extras: res.extras = pkg_resources.Requirement.parse('__placeholder__' + extras).extras return res
Ensure that if a link can be found for this that it is found.
def populate_link(self, finder, upgrade): """Ensure that if a link can be found for this, that it is found. Note that self.link may still be None - if Upgrade is False and the requirement is already installed. """ if self.link is None: self.link = finder.find_requirement(self, upgrade)
Move self. _temp_build_dir to self. _ideal_build_dir/ self. req. name
def _correct_build_location(self): """Move self._temp_build_dir to self._ideal_build_dir/self.req.name For some requirements (e.g. a path to a directory), the name of the package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. This is only called by self.egg_info_path to fix the temporary build directory. """ if self.source_dir is not None: return assert self.req is not None assert self._temp_build_dir assert self._ideal_build_dir old_location = self._temp_build_dir self._temp_build_dir = None new_location = self.build_location(self._ideal_build_dir) if os.path.exists(new_location): raise InstallationError( 'A package already exists in %s; please remove it to continue' % display_path(new_location)) logger.debug( 'Moving package %s from %s to new location %s', self, display_path(old_location), display_path(new_location), ) shutil.move(old_location, new_location) self._temp_build_dir = new_location self._ideal_build_dir = None self.source_dir = new_location self._egg_info_path = None
Ensure that a source_dir is set.
def ensure_has_source_dir(self, parent_dir): """Ensure that a source_dir is set. This will create a temporary build dir if the name of the requirement isn't known yet. :param parent_dir: The ideal pip parent_dir for the source_dir. Generally src_dir for editables and build_dir for sdists. :return: self.source_dir """ if self.source_dir is None: self.source_dir = self.build_location(parent_dir) return self.source_dir
Remove the source files from this requirement if they are marked for deletion
def remove_temporary_source(self): """Remove the source files from this requirement, if they are marked for deletion""" if self.source_dir and os.path.exists( os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): logger.debug('Removing source in %s', self.source_dir) rmtree(self.source_dir) self.source_dir = None if self._temp_build_dir and os.path.exists(self._temp_build_dir): rmtree(self._temp_build_dir) self._temp_build_dir = None
Return a pkg_resources. Distribution built from self. egg_info_path
def get_dist(self): """Return a pkg_resources.Distribution built from self.egg_info_path""" egg_info = self.egg_info_path('').rstrip('/') base_dir = os.path.dirname(egg_info) metadata = pkg_resources.PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] return pkg_resources.Distribution( os.path.dirname(egg_info), project_name=dist_name, metadata=metadata)
Extract metadata from filenames. Extracts the 4 metadataitems needed ( name version pyversion arch ) from the installer filename and the name of the egg - info directory embedded in the zipfile ( if any ).
def parse_info(wininfo_name, egginfo_name): """Extract metadata from filenames. Extracts the 4 metadataitems needed (name, version, pyversion, arch) from the installer filename and the name of the egg-info directory embedded in the zipfile (if any). The egginfo filename has the format:: name-ver(-pyver)(-arch).egg-info The installer filename has the format:: name-ver.arch(-pyver).exe Some things to note: 1. The installer filename is not definitive. An installer can be renamed and work perfectly well as an installer. So more reliable data should be used whenever possible. 2. The egg-info data should be preferred for the name and version, because these come straight from the distutils metadata, and are mandatory. 3. The pyver from the egg-info data should be ignored, as it is constructed from the version of Python used to build the installer, which is irrelevant - the installer filename is correct here (even to the point that when it's not there, any version is implied). 4. The architecture must be taken from the installer filename, as it is not included in the egg-info data. 5. Architecture-neutral installers still have an architecture because the installer format itself (being executable) is architecture-specific. We should therefore ignore the architecture if the content is pure-python. """ egginfo = None if egginfo_name: egginfo = egg_info_re.search(egginfo_name) if not egginfo: raise ValueError("Egg info filename %s is not valid" % (egginfo_name,)) # Parse the wininst filename # 1. Distribution name (up to the first '-') w_name, sep, rest = wininfo_name.partition('-') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) # Strip '.exe' rest = rest[:-4] # 2. Python version (from the last '-', must start with 'py') rest2, sep, w_pyver = rest.rpartition('-') if sep and w_pyver.startswith('py'): rest = rest2 w_pyver = w_pyver.replace('.', '') else: # Not version specific - use py2.py3. While it is possible that # pure-Python code is not compatible with both Python 2 and 3, there # is no way of knowing from the wininst format, so we assume the best # here (the user can always manually rename the wheel to be more # restrictive if needed). w_pyver = 'py2.py3' # 3. Version and architecture w_ver, sep, w_arch = rest.rpartition('.') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) if egginfo: w_name = egginfo.group('name') w_ver = egginfo.group('ver') return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver)
Test if the attribute given is an internal python attribute. For example this function returns True for the func_code attribute of python objects. This is useful if the environment method: meth: ~SandboxedEnvironment. is_safe_attribute is overridden.
def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(lambda: None, "func_code") True >>> is_internal_attribute((lambda x:x).func_code, 'co_code') True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, function_type): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, method_type): if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == 'mro': return True elif isinstance(obj, (code_type, traceback_type, frame_type)): return True elif isinstance(obj, generator_type): if attr in UNSAFE_GENERATOR_ATTRIBUTES: return True return attr.startswith('__')
This is the same as accessing: attr: stream with the difference that if it finds cached data from calling: meth: get_data first it will create a new stream out of the cached data.
def _get_stream_for_parsing(self): """This is the same as accessing :attr:`stream` with the difference that if it finds cached data from calling :meth:`get_data` first it will create a new stream out of the cached data. .. versionadded:: 0.9.3 """ cached_data = getattr(self, '_cached_data', None) if cached_data is not None: return BytesIO(cached_data) return self.stream
This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting cache to False.
def get_data(self, cache=True, as_text=False, parse_form_data=False): """This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded unicode string. .. versionadded:: 0.9 """ rv = getattr(self, '_cached_data', None) if rv is None: if parse_form_data: self._load_form_data() rv = self.stream.read() if cache: self._cached_data = rv if as_text: rv = rv.decode(self.charset, self.encoding_errors) return rv
This is automatically called right before the response is started and returns headers modified for the given environment. It returns a copy of the headers from the response with some modifications applied if necessary.
def get_wsgi_headers(self, environ): """This is automatically called right before the response is started and returns headers modified for the given environment. It returns a copy of the headers from the response with some modifications applied if necessary. For example the location header (if present) is joined with the root URL of the environment. Also the content length is automatically set to zero here for certain status codes. .. versionchanged:: 0.6 Previously that function was called `fix_headers` and modified the response object in place. Also since 0.6, IRIs in location and content-location headers are handled properly. Also starting with 0.6, Werkzeug will attempt to set the content length if it is able to figure it out on its own. This is the case if all the strings in the response iterable are already encoded and the iterable is buffered. :param environ: the WSGI environment of the request. :return: returns a new :class:`~werkzeug.datastructures.Headers` object. """ headers = Headers(self.headers) location = None content_location = None content_length = None status = self.status_code # iterate over the headers to find all values in one go. Because # get_wsgi_headers is used each response that gives us a tiny # speedup. for key, value in headers: ikey = key.lower() if ikey == u'location': location = value elif ikey == u'content-location': content_location = value elif ikey == u'content-length': content_length = value # make sure the location header is an absolute URL if location is not None: old_location = location if isinstance(location, text_type): # Safe conversion is necessary here as we might redirect # to a broken URI scheme (for instance itms-services). location = iri_to_uri(location, safe_conversion=True) if self.autocorrect_location_header: current_url = get_current_url(environ, root_only=True) if isinstance(current_url, text_type): current_url = iri_to_uri(current_url) location = url_join(current_url, location) if location != old_location: headers['Location'] = location # make sure the content location is a URL if content_location is not None and \ isinstance(content_location, text_type): headers['Content-Location'] = iri_to_uri(content_location) # remove entity headers and set content length to zero if needed. # Also update content_length accordingly so that the automatic # content length detection does not trigger in the following # code. if 100 <= status < 200 or status == 204: headers['Content-Length'] = content_length = u'0' elif status == 304: remove_entity_headers(headers) # if we can determine the content length automatically, we # should try to do that. But only if this does not involve # flattening the iterator or encoding of unicode strings in # the response. We however should not do that if we have a 304 # response. if self.automatically_set_content_length and \ self.is_sequence and content_length is None and status != 304: try: content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response) except UnicodeError: # aha, something non-bytestringy in there, too bad, we # can't safely figure out the length of the response. pass else: headers['Content-Length'] = str(content_length) return headers
r Sometimes you get an URL by a user that just isn t a real URL because it contains unsafe characters like and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user:
def url_fix(s, charset='utf-8'): r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string. """ # First step is to switch to unicode processing and to convert # backslashes (which are invalid in URLs anyways) to slashes. This is # consistent with what Chrome does. s = to_unicode(s, charset, 'replace').replace('\\', '/') # For the specific case that we look like a malformed windows URL # we want to fix this up manually: if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'): s = 'file:///' + s[7:] url = url_parse(s) path = url_quote(url.path, charset, safe='/%+$!*\'(),') qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),') anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),') return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
r Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf - 8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI.
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False): r""" Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th' There is a general problem with IRI and URI conversion with some protocols that appear in the wild that are in violation of the URI specification. In places where Werkzeug goes through a forced IRI to URI conversion it will set the `safe_conversion` flag which will not perform a conversion if the end result is already ASCII. This can mean that the return value is not an entirely correct URI but it will not destroy such invalid URLs in the process. As an example consider the following two IRIs:: magnet:?xt=uri:whatever itms-services://?action=download-manifest The internal representation after parsing of those URLs is the same and there is no way to reconstruct the original one. If safe conversion is enabled however this function becomes a noop for both of those strings as they both can be considered URIs. .. versionadded:: 0.6 .. versionchanged:: 0.9.6 The `safe_conversion` parameter was added. :param iri: The IRI to convert. :param charset: The charset for the URI. :param safe_conversion: indicates if a safe conversion should take place. For more information see the explanation above. """ if isinstance(iri, tuple): iri = url_unparse(iri) if safe_conversion: try: native_iri = to_native(iri) ascii_iri = to_native(iri).encode('ascii') if ascii_iri.split() == [ascii_iri]: return native_iri except UnicodeError: pass iri = url_parse(to_unicode(iri, charset, errors)) netloc = iri.encode_netloc() path = url_quote(iri.path, charset, errors, '/:~+%') query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=') fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/') return to_native(url_unparse((iri.scheme, netloc, path, query, fragment)))
r Return full path to the user - specific cache dir for this application.
def user_cache_dir(appname): r""" Return full path to the user-specific cache dir for this application. "appname" is the name of application. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = os.path.expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path
Return full path to the user - specific data dir for this application.
def user_data_dir(appname, roaming=False): """ Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/<AppName> Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\<username>\ ... ...Application Data\<AppName> Win XP (roaming): C:\Documents and Settings\<username>\Local ... ...Settings\Application Data\<AppName> Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName> Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName> For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/<AppName>". """ if WINDOWS: const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) elif sys.platform == "darwin": path = os.path.join( os.path.expanduser('~/Library/Application Support/'), appname, ) else: path = os.path.join( os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")), appname, ) return path
Return full path to the user - specific log dir for this application.
def user_log_dir(appname): """ Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. Typical user cache directories are: Mac OS X: ~/Library/Logs/<AppName> Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\<username>\Local Settings\ ... ...Application Data\<AppName>\Logs Vista: C:\\Users\<username>\AppData\Local\<AppName>\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. """ if WINDOWS: path = os.path.join(user_data_dir(appname), "Logs") elif sys.platform == "darwin": path = os.path.join(os.path.expanduser('~/Library/Logs'), appname) else: path = os.path.join(user_cache_dir(appname), "log") return path
Return full path to the user - specific config dir for this application.
def user_config_dir(appname, roaming=True): """Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default True) can be set False to not use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/<AppName>". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) elif sys.platform == "darwin": path = user_data_dir(appname) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) path = os.path.join(path, appname) return path
Return a list of potential user - shared config dirs for this application.
def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.sep.join([os.path.expanduser(x), appname]) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist
This iterates over all relevant Python files. It goes through all loaded files from modules all files in folders of already loaded modules as well as all files reachable through a package.
def _iter_module_files(): """This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package. """ # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, '__file__', None) if filename: old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in ('.pyc', '.pyo'): filename = filename[:-1] yield filename
Spawn a new Python interpreter with the same arguments as this one but running the reloader thread.
def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log('info', ' * Restarting with %s' % self.name) args = [sys.executable] + sys.argv new_environ = os.environ.copy() new_environ['WERKZEUG_RUN_MAIN'] = 'true' # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == 'nt' and PY2: for key, value in iteritems(new_environ): if isinstance(value, text_type): new_environ[key] = value.encode('iso-8859-1') exit_code = subprocess.call(args, env=new_environ) if exit_code != 3: return exit_code
Wrapper around six. text_type to convert None to empty string
def to_text(s, blank_if_none=True): """Wrapper around six.text_type to convert None to empty string""" if s is None: if blank_if_none: return "" else: return None elif isinstance(s, text_type): return s else: return text_type(s)
Return an existing CA bundle path or None
def find_ca_bundle(): """Return an existing CA bundle path, or None""" if os.name=='nt': return get_win_certfile() else: for cert_path in cert_paths: if os.path.isfile(cert_path): return cert_path try: return pkg_resources.resource_filename('certifi', 'cacert.pem') except (ImportError, ResolutionError, ExtractionError): return None
Parse a string or file - like object into a tree
def parse(doc, treebuilder="etree", encoding=None, namespaceHTMLElements=True): """Parse a string or file-like object into a tree""" tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parse(doc, encoding=encoding)
Parse a HTML document into a well - formed tree
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): """Parse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, innerHTML=False, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet) return self.tree.getDocument()
Parse a HTML fragment into a well - formed tree fragment
def parseFragment(self, stream, container="div", encoding=None, parseMeta=False, useChardet=True): """Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, True, container=container, encoding=encoding) return self.tree.getFragment()
Generic RCDATA/ RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT
def parseRCDataRawtext(self, token, contentType): """Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT """ assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"]
pass in a word string that you would like to see probable matches for.
def translate(self, word): """ pass in a word string that you would like to see probable matches for. """ if (word not in self.transmissions): raise NoMatchError('no matches found') else: trans = self.transmissions[word] # print out a sorted list of all non-zero trans return sorted(((k, v) for k, v in trans.iteritems() if v != 0), reverse=True)
this converts the readin lines from sys to useable format returns list of token and dict of tokens
def convertArgsToTokens(self, data): """ this converts the readin lines from sys to useable format, returns list of token and dict of tokens """ tdict = [] tokens = [] d = open(data, 'r') for line in d.readlines(): tdict.append(line.rstrip()) tokens += line.split() d.close() tokens = list(set(tokens)) return tdict, tokens
get all probable matches and then initialize t ( f|e )
def initTef(self): ''' get all probable matches and then initialize t(f|e) ''' probs = {} transmissions = {} # go through each german word for word in self.en_words: word_poss = [] # if word in sentence.. then for sent in self.en_dict: if word in sent: matching = self.de_dict[self.en_dict.index(sent)] word_poss = word_poss + matching.split() # remove the duplicates word_poss = list(set(word_poss)) # add the probable matches probs[word] = word_poss self.probs = probs print self.probs for word in self.en_words: # print self.probs word_probs = self.probs[word] if (len(word_probs) == 0): print word, word_probs uniform_prob = 1.0 / len(word_probs) word_probs = dict([(w, uniform_prob) for w in word_probs]) # save word_probs transmissions[word] = word_probs self.transmissions = transmissions
Iterate through all transmissions of english to foreign words. keep count of repeated occurences do until convergence set count ( e|f ) to 0 for all e f set total ( f ) to 0 for all f for all sentence pairs ( e_s f_s ) set total_s ( e ) = 0 for all e for all words e in e_s for all words f in f_s total_s ( e ) + = t ( e|f ) for all words e in e_s for all words f in f_s count ( e|f ) + = t ( e|f )/ total_s ( e ) total ( f ) + = t ( e|f )/ total_s ( e ) for all f for all e t ( e|f ) = count ( e|f )/ total ( f )
def iterateEM(self, count): ''' Iterate through all transmissions of english to foreign words. keep count of repeated occurences do until convergence set count(e|f) to 0 for all e,f set total(f) to 0 for all f for all sentence pairs (e_s,f_s) set total_s(e) = 0 for all e for all words e in e_s for all words f in f_s total_s(e) += t(e|f) for all words e in e_s for all words f in f_s count(e|f) += t(e|f) / total_s(e) total(f) += t(e|f) / total_s(e) for all f for all e t(e|f) = count(e|f) / total(f) ''' for iter in range(count): countef = {} totalf = {} # set the count of the words to zero for word in self.en_words: if(word not in self.probs): continue word_probs = self.probs[word] count = dict([(w, 0) for w in word_probs]) countef[word] = count totalf[word] = 0 self.countef = countef self.totalf = totalf # NOW iterate over each word pair for (es, ds) in self.sent_pairs: es_split = es.split() ds_split = ds.split() for d in ds_split: self.totals[d] = 0 for e in es_split: if (e not in self.transmissions): continue e_trans = self.transmissions[e] if (d not in e_trans): continue self.totals[d] += e_trans[d] # Get count(e|f) and total(f) for e in es_split: if(e not in self.transmissions): continue if (d not in self.transmissions[e]): continue self.countef[e][ d] += self.transmissions[e][d] / self.totals[d] self.totalf[ e] += self.transmissions[e][d] / self.totals[d] for e in self.en_words: if (e not in self.probs): continue e_prob = self.probs[e] for d in e_prob: self.transmissions[e][d] = self.countef[ e][d] / self.totalf[e]
Bind and activate HTTP server.
def bind(self): """Bind and activate HTTP server.""" HTTPServer.__init__(self, (self.host, self.port), HTTPRequestHandler) self.port = self.server_port
Report startup info to stdout.
def report(self): """Report startup info to stdout.""" print( self.report_message.format( service=self.service, host=self.host, port=self.port, ) ) sys.stdout.flush()
Creates an SSL key for development. This should be used instead of the adhoc key which generates a new cert on each server start. It accepts a path for where it should store the key and cert and either a host or CN. If a host is given it will use the CN *. host/ CN = host.
def make_ssl_devcert(base_path, host=None, cn=None): """Creates an SSL key for development. This should be used instead of the ``'adhoc'`` key which generates a new cert on each server start. It accepts a path for where it should store the key and cert and either a host or CN. If a host is given it will use the CN ``*.host/CN=host``. For more information see :func:`run_simple`. .. versionadded:: 0.9 :param base_path: the path to the certificate and key. The extension ``.crt`` is added for the certificate, ``.key`` is added for the key. :param host: the name of the host. This can be used as an alternative for the `cn`. :param cn: the `CN` to use. """ from OpenSSL import crypto if host is not None: cn = '*.%s/CN=%s' % (host, host) cert, pkey = generate_adhoc_ssl_pair(cn=cn) cert_file = base_path + '.crt' pkey_file = base_path + '.key' with open(cert_file, 'wb') as f: f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) with open(pkey_file, 'wb') as f: f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) return cert_file, pkey_file
Loads bytecode from a file or file like object.
def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return self.code = marshal_load(f)
Convert keyword args to a dictionary of stylesheet parameters. XSL stylesheet parameters must be XPath expressions i. e.:
def stylesheet_params(**kwargs): """Convert keyword args to a dictionary of stylesheet parameters. XSL stylesheet parameters must be XPath expressions, i.e.: * string expressions, like "'5'" * simple (number) expressions, like "5" * valid XPath expressions, like "/a/b/text()" This function converts native Python keyword arguments to stylesheet parameters following these rules: If an arg is a string wrap it with XSLT.strparam(). If an arg is an XPath object use its path string. If arg is None raise TypeError. Else convert arg to string. """ result = {} for key, val in kwargs.items(): if isinstance(val, basestring): val = _etree.XSLT.strparam(val) elif val is None: raise TypeError('None not allowed as a stylesheet parameter') elif not isinstance(val, _etree.XPath): val = unicode(val) result[key] = val return result
Return a copy of paramsDict updated with kwargsDict entries wrapped as stylesheet arguments. kwargsDict entries with a value of None are ignored.
def _stylesheet_param_dict(paramsDict, kwargsDict): """Return a copy of paramsDict, updated with kwargsDict entries, wrapped as stylesheet arguments. kwargsDict entries with a value of None are ignored. """ # beware of changing mutable default arg paramsDict = dict(paramsDict) for k, v in kwargsDict.items(): if v is not None: # None values do not override paramsDict[k] = v paramsDict = stylesheet_params(**paramsDict) return paramsDict
Extract embedded schematron schema from non - schematron host schema. This method will only be called by __init__ if the given schema document is not a schematron schema by itself. Must return a schematron schema document tree or None.
def _extract(self, element): """Extract embedded schematron schema from non-schematron host schema. This method will only be called by __init__ if the given schema document is not a schematron schema by itself. Must return a schematron schema document tree or None. """ schematron = None if element.tag == _xml_schema_root: schematron = self._extract_xsd(element) elif element.nsmap[element.prefix] == RELAXNG_NS: # RelaxNG does not have a single unique root element schematron = self._extract_rng(element) return schematron
Return the name of the version control backend if found at given location e. g. vcs. get_backend_name (/ path/ to/ vcs/ checkout )
def get_backend_name(self, location): """ Return the name of the version control backend if found at given location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') """ for vc_type in self._registry.values(): logger.debug('Checking in %s for %s (%s)...', location, vc_type.dirname, vc_type.name) path = os.path.join(location, vc_type.dirname) if os.path.exists(path): logger.debug('Determine that %s uses VCS: %s', location, vc_type.name) return vc_type.name return None
posix absolute paths start with os. path. sep win32 ones ones start with drive ( like c: \\ folder )
def _is_local_repository(self, repo): """ posix absolute paths start with os.path.sep, win32 ones ones start with drive (like c:\\folder) """ drive, tail = os.path.splitdrive(repo) return repo.startswith(os.path.sep) or drive
Returns ( url revision ) where both are strings
def get_info(self, location): """ Returns (url, revision), where both are strings """ assert not location.rstrip('/').endswith(self.dirname), \ 'Bad directory: %s' % location return self.get_url(location), self.get_revision(location)
Clean up current location and download the url repository ( and vcs infos ) into location
def unpack(self, location): """ Clean up current location and download the url repository (and vcs infos) into location """ if os.path.exists(location): rmtree(location) self.obtain(location)
Run a VCS subcommand This is simply a wrapper around call_subprocess that adds the VCS command name and checks that the VCS is available
def run_command(self, cmd, show_stdout=True, cwd=None, raise_on_returncode=True, command_level=logging.DEBUG, command_desc=None, extra_environ=None): """ Run a VCS subcommand This is simply a wrapper around call_subprocess that adds the VCS command name, and checks that the VCS is available """ cmd = [self.name] + cmd try: return call_subprocess(cmd, show_stdout, cwd, raise_on_returncode, command_level, command_desc, extra_environ) except OSError as e: # errno.ENOENT = no such file or directory # In other words, the VCS executable isn't available if e.errno == errno.ENOENT: raise BadCommand('Cannot find command %r' % self.name) else: raise
Return implementation version.
def get_impl_ver(): """Return implementation version.""" impl_ver = sysconfig.get_config_var("py_version_nodot") if not impl_ver: impl_ver = ''.join(map(str, sys.version_info[:2])) return impl_ver
Return a list of supported tags for each version specified in versions.
def get_supported(versions=None): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] major = sys.version_info[0] # Support all previous minor Python versions. for minor in range(sys.version_info[1], -1, -1): versions.append(''.join(map(str, (major, minor)))) impl = get_abbr_impl() abis = [] soabi = sysconfig.get_config_var('SOABI') if soabi and soabi.startswith('cpython-'): abis[0:0] = ['cp' + soabi.split('-', 1)[-1]] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith('.abi'): abi3s.add(suffix[0].split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') arch = get_platform() # Current version, current API (built specifically for our Python): for abi in abis: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # No abi / arch, but requires our implementation: for i, version in enumerate(versions): supported.append(('%s%s' % (impl, version), 'none', 'any')) if i == 0: # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported
Return the real host for the given WSGI environment. This first checks the X - Forwarded - Host header then the normal Host header and finally the SERVER_NAME environment variable ( using the first one it finds ).
def get_host(environ, trusted_hosts=None): """Return the real host for the given WSGI environment. This first checks the `X-Forwarded-Host` header, then the normal `Host` header, and finally the `SERVER_NAME` environment variable (using the first one it finds). Optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. :param environ: the WSGI environment to get the host of. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ if 'HTTP_X_FORWARDED_HOST' in environ: rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip() elif 'HTTP_HOST' in environ: rv = environ['HTTP_HOST'] else: rv = environ['SERVER_NAME'] if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ in (('https', '443'), ('http', '80')): rv += ':' + environ['SERVER_PORT'] if trusted_hosts is not None: if not host_is_trusted(rv, trusted_hosts): from werkzeug.exceptions import SecurityError raise SecurityError('Host "%s" is not trusted' % rv) return rv
Yield egg or source distribution objects based on basename
def distros_for_location(location, basename, metadata=None): """Yield egg or source distribution objects based on basename""" if basename.endswith('.egg.zip'): basename = basename[:-4] # strip the .zip if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: return interpret_distro_name( location, win_base, metadata, py_ver, BINARY_DIST, platform ) # Try source distro extensions (.zip, .tgz, etc.) # for ext in EXTENSIONS: if basename.endswith(ext): basename = basename[:-len(ext)] return interpret_distro_name(location, basename, metadata) return []
Find rel = homepage and rel = download links in page yielding URLs
def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos!=-1: match = HREF.search(page,pos) if match: yield urljoin(url, htmldecode(match.group(1)))
A function compatible with Python 2. 3 - 3. 3 that will encode auth from a URL suitable for an HTTP header. >>> str ( _encode_auth ( username%3Apassword )) dXNlcm5hbWU6cGFzc3dvcmQ =
def _encode_auth(auth): """ A function compatible with Python 2.3-3.3 that will encode auth from a URL suitable for an HTTP header. >>> str(_encode_auth('username%3Apassword')) 'dXNlcm5hbWU6cGFzc3dvcmQ=' Long auth strings should not cause a newline to be inserted. >>> long_auth = 'username:' + 'password'*10 >>> chr(10) in str(_encode_auth(long_auth)) False """ auth_s = unquote(auth) # convert to bytes auth_bytes = auth_s.encode() # use the legacy interface for Python 2.3 support encoded_bytes = base64.encodestring(auth_bytes) # convert back to a string encoded = encoded_bytes.decode() # strip the trailing carriage return return encoded.replace('\n','')
Read a local path with special support for directories
def local_open(url): """Read a local path, with special support for directories""" scheme, server, path, param, query, frag = urlparse(url) filename = url2pathname(path) if os.path.isfile(filename): return urllib2.urlopen(url) elif path.endswith('/') and os.path.isdir(filename): files = [] for f in os.listdir(filename): if f=='index.html': with open(os.path.join(filename,f),'r') as fp: body = fp.read() break elif os.path.isdir(os.path.join(filename,f)): f+='/' files.append("<a href=%r>%s</a>" % (f,f)) else: body = ("<html><head><title>%s</title>" % url) + \ "</head><body>%s</body></html>" % '\n'.join(files) status, message = 200, "OK" else: status, message, body = 404, "Path not found", "Not found" headers = {'content-type': 'text/html'} return HTTPError(url, status, message, headers, StringIO(body))
Construct a ( possibly null ) ContentChecker from a URL
def from_url(cls, url): "Construct a (possibly null) ContentChecker from a URL" fragment = urlparse(url)[-1] if not fragment: return ContentChecker() match = cls.pattern.search(fragment) if not match: return ContentChecker() return cls(**match.groupdict())
Evaluate a URL as a possible download and maybe retrieve it
def process_url(self, url, retrieve=False): """Evaluate a URL as a possible download, and maybe retrieve it""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug("Found link: %s", url) if dists or not retrieve or url in self.fetched_urls: list(map(self.add, dists)) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info("Reading %s", url) self.fetched_urls[url] = True # prevent multiple fetch attempts f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url) if f is None: return self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. if isinstance(f, HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: charset = f.headers.get_param('charset') or 'latin-1' page = page.decode(charset, "ignore") f.close() for match in HREF.finditer(page): link = urljoin(base, htmldecode(match.group(1))) self.process_url(link) if url.startswith(self.index_url) and getattr(f,'code',None)!=404: page = self.process_index(url, page)
Return a list of supported tags for each version specified in versions.
def get_supported(versions=None, noarch=False): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] major = sys.version_info[0] # Support all previous minor Python versions. for minor in range(sys.version_info[1], -1, -1): versions.append(''.join(map(str, (major, minor)))) impl = get_abbr_impl() abis = [] try: soabi = sysconfig.get_config_var('SOABI') except IOError as e: # Issue #1074 warnings.warn("{0}".format(e), RuntimeWarning) soabi = None if soabi and soabi.startswith('cpython-'): abis[0:0] = ['cp' + soabi.split('-', 1)[-1]] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith('.abi'): abi3s.add(suffix[0].split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: arch = get_platform() if sys.platform == 'darwin': # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() actual_arches = [actual_arch] if actual_arch in ('i386', 'ppc'): actual_arches.append('fat') if actual_arch in ('i386', 'x86_64'): actual_arches.append('intel') if actual_arch in ('i386', 'ppc', 'x86_64'): actual_arches.append('fat3') if actual_arch in ('ppc64', 'x86_64'): actual_arches.append('fat64') if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): actual_arches.append('universal') tpl = '{0}_{1}_%i_%s'.format(name, major) arches = [] for m in range(int(minor) + 1): for a in actual_arches: arches.append(tpl % (m, a)) else: # arch pattern didn't match (?!) arches = [arch] else: arches = [arch] # Current version, current API (built specifically for our Python): for abi in abis: for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # Has binaries, does not use the Python API: supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, but requires our implementation: for i, version in enumerate(versions): supported.append(('%s%s' % (impl, version), 'none', 'any')) if i == 0: # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported
Remove duplicate entries from sys. path along with making them absolute
def removeduppaths(): """ Remove duplicate entries from sys.path along with making them absolute""" # This ensures that the initial path provided by the interpreter contains # only absolute pathnames, even if we're running from the build directory. L = [] known_paths = set() for dir in sys.path: # Filter out duplicate paths (on case-insensitive file systems also # if they only differ in case); turn relative paths into absolute # paths. dir, dircase = makepath(dir) if not dircase in known_paths: L.append(dir) known_paths.add(dircase) sys.path[:] = L return known_paths
Append./ build/ lib. <platform > in case we re running in the build dir ( especially for Guido: - )
def addbuilddir(): """Append ./build/lib.<platform> in case we're running in the build dir (especially for Guido :-)""" from distutils.util import get_platform s = "build/lib.%s-%.3s" % (get_platform(), sys.version) if hasattr(sys, 'gettotalrefcount'): s += '-pydebug' s = os.path.join(os.path.dirname(sys.path[-1]), s) sys.path.append(s)