Search is not available for this dataset
text
stringlengths
75
104k
def _download_http_url(link, session, temp_dir): """Download link url into temp_dir using provided session""" target_url = link.url.split('#', 1)[0] try: resp = session.get( target_url, # We use Accept-Encoding: identity here because requests # defaults to accepting compressed responses. This breaks in # a variety of ways depending on how the server is configured. # - Some servers will notice that the file isn't a compressible # file and will leave the file alone and with an empty # Content-Encoding # - Some servers will notice that the file is already # compressed and will leave the file alone and will add a # Content-Encoding: gzip header # - Some servers won't notice anything at all and will take # a file that's already been compressed and compress it again # and set the Content-Encoding: gzip header # By setting this to request only the identity encoding We're # hoping to eliminate the third case. Hopefully there does not # exist a server which when given a file will notice it is # already compressed and that you're not asking for a # compressed file and will then decompress it before sending # because if that's the case I don't think it'll ever be # possible to make this work. headers={"Accept-Encoding": "identity"}, stream=True, ) resp.raise_for_status() except requests.HTTPError as exc: logger.critical( "HTTP error %s while getting %s", exc.response.status_code, link, ) raise content_type = resp.headers.get('content-type', '') filename = link.filename # fallback # Have a look at the Content-Disposition header for a better guess content_disposition = resp.headers.get('content-disposition') if content_disposition: type, params = cgi.parse_header(content_disposition) # We use ``or`` here because we don't want to use an "empty" value # from the filename param. filename = params.get('filename') or filename ext = splitext(filename)[1] if not ext: ext = mimetypes.guess_extension(content_type) if ext: filename += ext if not ext and link.url != resp.url: ext = os.path.splitext(resp.url)[1] if ext: filename += ext file_path = os.path.join(temp_dir, filename) with open(file_path, 'wb') as content_file: _download_url(resp, link, content_file) return file_path, content_type
def _check_download_dir(link, download_dir): """ Check download_dir for previously downloaded file with correct hash If a correct file is found return its path else None """ download_path = os.path.join(download_dir, link.filename) if os.path.exists(download_path): # If already downloaded, does its hash match? logger.info('File was already downloaded %s', download_path) if link.hash: download_hash = _get_hash_from_file(download_path, link) try: _check_hash(download_hash, link) except HashMismatch: logger.warning( 'Previously-downloaded file %s has bad hash, ' 're-downloading.', download_path ) os.unlink(download_path) return None return download_path return None
def currencyFormat(_context, code, symbol, format, currency_digits=True, decimal_quantization=True, name=''): """Handle currencyFormat subdirectives.""" _context.action( discriminator=('currency', name, code), callable=_register_currency, args=(name, code, symbol, format, currency_digits, decimal_quantization) )
def exchange(_context, component, backend, base, name=''): """Handle exchange subdirectives.""" _context.action( discriminator=('currency', 'exchange', component), callable=_register_exchange, args=(name, component, backend, base) )
def print_results(distributions, list_all_files): """ Print the informations from installed distributions found. """ results_printed = False for dist in distributions: results_printed = True logger.info("---") logger.info("Metadata-Version: %s" % dist.get('metadata-version')) logger.info("Name: %s" % dist['name']) logger.info("Version: %s" % dist['version']) logger.info("Summary: %s" % dist.get('summary')) logger.info("Home-page: %s" % dist.get('home-page')) logger.info("Author: %s" % dist.get('author')) logger.info("Author-email: %s" % dist.get('author-email')) logger.info("License: %s" % dist.get('license')) logger.info("Location: %s" % dist['location']) logger.info("Requires: %s" % ', '.join(dist['requires'])) if list_all_files: logger.info("Files:") if dist['files'] is not None: for line in dist['files']: logger.info(" %s" % line.strip()) else: logger.info("Cannot locate installed-files.txt") if 'entry_points' in dist: logger.info("Entry-points:") for line in dist['entry_points']: logger.info(" %s" % line.strip()) return results_printed
def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ try: if decode_content and self._decoder: data = self._decoder.decompress(data) except (IOError, zlib.error) as e: content_encoding = self.headers.get('content-encoding', '').lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e) if flush_decoder and decode_content and self._decoder: buf = self._decoder.decompress(binary_type()) data += buf + self._decoder.flush() return data
def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ self._init_decoder() if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False try: try: if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() flush_decoder = True else: cache_content = False data = self._fp.read(amt) if amt != 0 and not data: # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if 'read operation timed out' not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise raise ReadTimeoutError(self._pool, None, 'Read timed out.') except HTTPException as e: # This includes IncompleteRead. raise ProtocolError('Connection broken: %r' % e, e) self._fp_bytes_read += len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data finally: if self._original_response and self._original_response.isclosed(): self.release_conn()
def stream(self, amt=2**16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ if self.chunked: for line in self.read_chunked(amt, decode_content=decode_content): yield line else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content) if data: yield data
def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked("Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") if self._original_response and self._original_response._method.upper() == 'HEAD': # Don't bother reading the body of a HEAD request. # FIXME: Can we do this somehow without accessing private httplib _method? self._original_response.close() return while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) yield self._decode(chunk, decode_content=decode_content, flush_decoder=True) # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b'\r\n': break # We read everything; close the "file". if self._original_response: self._original_response.close() self.release_conn()
def _default_template_ctx_processor(): """Default template context processor. Injects `request`, `session` and `g`. """ reqctx = _request_ctx_stack.top appctx = _app_ctx_stack.top rv = {} if appctx is not None: rv['g'] = appctx.g if reqctx is not None: rv['request'] = reqctx.request rv['session'] = reqctx.session return rv
def _render(template, context, app): """Renders the template and fires the signal""" rv = template.render(context) template_rendered.send(app, template=template, context=context) return rv
def render_template(template_name_or_list, **context): """Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app)
def render_template_string(source, **context): """Renders a template from the given template source string with the given context. :param source: the sourcecode of the template to be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.from_string(source), context, ctx.app)
def parse_version(version): """Use parse_version from pkg_resources or distutils as available.""" global parse_version try: from pkg_resources import parse_version except ImportError: from distutils.version import LooseVersion as parse_version return parse_version(version)
def install(self, force=False, overrides={}): """ Install the wheel into site-packages. """ # Utility to get the target directory for a particular key def get_path(key): return overrides.get(key) or self.install_paths[key] # The base target location is either purelib or platlib if self.parsed_wheel_info['Root-Is-Purelib'] == 'true': root = get_path('purelib') else: root = get_path('platlib') # Parse all the names in the archive name_trans = {} for info in self.zipfile.infolist(): name = info.filename # Zip files can contain entries representing directories. # These end in a '/'. # We ignore these, as we create directories on demand. if name.endswith('/'): continue # Pathnames in a zipfile namelist are always /-separated. # In theory, paths could start with ./ or have other oddities # but this won't happen in practical cases of well-formed wheels. # We'll cover the simple case of an initial './' as it's both easy # to do and more common than most other oddities. if name.startswith('./'): name = name[2:] # Split off the base directory to identify files that are to be # installed in non-root locations basedir, sep, filename = name.partition('/') if sep and basedir == self.datadir_name: # Data file. Target destination is elsewhere key, sep, filename = filename.partition('/') if not sep: raise ValueError("Invalid filename in wheel: {0}".format(name)) target = get_path(key) else: # Normal file. Target destination is root key = '' target = root filename = name # Map the actual filename from the zipfile to its intended target # directory and the pathname relative to that directory. dest = os.path.normpath(os.path.join(target, filename)) name_trans[info] = (key, target, filename, dest) # We're now ready to start processing the actual install. The process # is as follows: # 1. Prechecks - is the wheel valid, is its declared architecture # OK, etc. [[Responsibility of the caller]] # 2. Overwrite check - do any of the files to be installed already # exist? # 3. Actual install - put the files in their target locations. # 4. Update RECORD - write a suitably modified RECORD file to # reflect the actual installed paths. if not force: for info, v in name_trans.items(): k = info.filename key, target, filename, dest = v if os.path.exists(dest): raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest)) # Get the name of our executable, for use when replacing script # wrapper hashbang lines. # We encode it using getfilesystemencoding, as that is "the name of # the encoding used to convert Unicode filenames into system file # names". exename = sys.executable.encode(sys.getfilesystemencoding()) record_data = [] record_name = self.distinfo_name + '/RECORD' for info, (key, target, filename, dest) in name_trans.items(): name = info.filename source = self.zipfile.open(info) # Skip the RECORD file if name == record_name: continue ddir = os.path.dirname(dest) if not os.path.isdir(ddir): os.makedirs(ddir) destination = HashingFile(open(dest, 'wb')) if key == 'scripts': hashbang = source.readline() if hashbang.startswith(b'#!python'): hashbang = b'#!' + exename + binary(os.linesep) destination.write(hashbang) shutil.copyfileobj(source, destination) reldest = os.path.relpath(dest, root) reldest.replace(os.sep, '/') record_data.append((reldest, destination.digest(), destination.length)) destination.close() source.close() # preserve attributes (especially +x bit for scripts) attrs = info.external_attr >> 16 if attrs: # tends to be 0 if Windows. os.chmod(dest, info.external_attr >> 16) record_name = os.path.join(root, self.record_name) writer = csv.writer(open_for_csv(record_name, 'w+')) for reldest, digest, length in sorted(record_data): writer.writerow((reldest, digest, length)) writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None): """Configure the VerifyingZipFile `zipfile` by verifying its signature and setting expected hashes for every hash in RECORD. Caller must complete the verification process by completely reading every file in the archive (e.g. with extractall).""" sig = None if zipfile is None: zipfile = self.zipfile zipfile.strict = True record_name = '/'.join((self.distinfo_name, 'RECORD')) sig_name = '/'.join((self.distinfo_name, 'RECORD.jws')) # tolerate s/mime signatures: smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s')) zipfile.set_expected_hash(record_name, None) zipfile.set_expected_hash(sig_name, None) zipfile.set_expected_hash(smime_sig_name, None) record = zipfile.read(record_name) record_digest = urlsafe_b64encode(hashlib.sha256(record).digest()) try: sig = from_json(native(zipfile.read(sig_name))) except KeyError: # no signature pass if sig: headers, payload = signatures.verify(sig) if payload['hash'] != "sha256=" + native(record_digest): msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}." raise BadWheelFile(msg.format(payload['hash'], native(record_digest))) reader = csv.reader((native(r) for r in record.splitlines())) for row in reader: filename = row[0] hash = row[1] if not hash: if filename not in (record_name, sig_name): sys.stderr.write("%s has no hash!\n" % filename) continue algo, data = row[1].split('=', 1) assert algo == "sha256", "Unsupported hash algorithm" zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
def is_declared(self, name): """Check if a name is declared in this or an outer scope.""" if name in self.declared_locally or name in self.declared_parameter: return True return name in self.declared
def inspect(self, nodes): """Walk the node and check for identifiers. If the scope is hard (eg: enforce on a python level) overrides from outer scopes are tracked differently. """ visitor = FrameIdentifierVisitor(self.identifiers) for node in nodes: visitor.visit(node)
def visit_Name(self, node): """All assignments to names go through this function.""" if node.ctx == 'store': self.identifiers.declared_locally.add(node.name) elif node.ctx == 'param': self.identifiers.declared_parameter.add(node.name) elif node.ctx == 'load' and not \ self.identifiers.is_declared(node.name): self.identifiers.undeclared.add(node.name)
def visit_Include(self, node, frame): """Handles includes.""" if node.with_context: self.unoptimize_scope(frame) if node.ignore_missing: self.writeline('try:') self.indent() func_name = 'get_or_select_template' if isinstance(node.template, nodes.Const): if isinstance(node.template.value, string_types): func_name = 'get_template' elif isinstance(node.template.value, (tuple, list)): func_name = 'select_template' elif isinstance(node.template, (nodes.Tuple, nodes.List)): func_name = 'select_template' self.writeline('template = environment.%s(' % func_name, node) self.visit(node.template, frame) self.write(', %r)' % self.name) if node.ignore_missing: self.outdent() self.writeline('except TemplateNotFound:') self.indent() self.writeline('pass') self.outdent() self.writeline('else:') self.indent() if node.with_context: self.writeline('for event in template.root_render_func(' 'template.new_context(context.parent, True, ' 'locals())):') else: self.writeline('for event in template.module._body_stream:') self.indent() self.simple_write('event', frame) self.outdent() if node.ignore_missing: self.outdent()
def visit_FromImport(self, node, frame): """Visit named imports.""" self.newline(node) self.write('included_template = environment.get_template(') self.visit(node.template, frame) self.write(', %r).' % self.name) if node.with_context: self.write('make_module(context.parent, True)') else: self.write('module') var_names = [] discarded_names = [] for name in node.names: if isinstance(name, tuple): name, alias = name else: alias = name self.writeline('l_%s = getattr(included_template, ' '%r, missing)' % (alias, name)) self.writeline('if l_%s is missing:' % alias) self.indent() self.writeline('l_%s = environment.undefined(%r %% ' 'included_template.__name__, ' 'name=%r)' % (alias, 'the template %%r (imported on %s) does ' 'not export the requested name %s' % ( self.position(node), repr(name) ), name)) self.outdent() if frame.toplevel: var_names.append(alias) if not alias.startswith('_'): discarded_names.append(alias) frame.assigned_names.add(alias) if var_names: if len(var_names) == 1: name = var_names[0] self.writeline('context.vars[%r] = l_%s' % (name, name)) else: self.writeline('context.vars.update({%s})' % ', '.join( '%r: l_%s' % (name, name) for name in var_names )) if discarded_names: if len(discarded_names) == 1: self.writeline('context.exported_vars.discard(%r)' % discarded_names[0]) else: self.writeline('context.exported_vars.difference_' 'update((%s))' % ', '.join(imap(repr, discarded_names)))
def make_wheelfile_inner(base_name, base_dir='.'): """Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.""" zip_filename = base_name + ".whl" log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) # XXX support bz2, xz when available zip = zipfile.ZipFile(open(zip_filename, "wb+"), "w", compression=zipfile.ZIP_DEFLATED) score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3} deferred = [] def writefile(path): zip.write(path, path) log.info("adding '%s'" % path) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): if dirpath.endswith('.dist-info'): deferred.append((score.get(name, 0), path)) else: writefile(path) deferred.sort() for score, path in deferred: writefile(path) zip.close() return zip_filename
def atomize(f, lock=None): """ Decorate a function with a reentrant lock to prevent multiple threads from calling said thread simultaneously. """ lock = lock or threading.RLock() @functools.wraps(f) def exec_atomic(*args, **kwargs): lock.acquire() try: return f(*args, **kwargs) finally: lock.release() return exec_atomic
def service_factory(app, host, port, report_message='service factory port {port}', provider_cls=HTTPServiceProvider): """Create service, start server. :param app: application to instantiate a service :param host: interface to bound provider :param port: port to bound provider :param report_message: message format to report port :param provider_cls: server class provide a service """ service = Service(app) server = provider_cls(service, host, port, report_message) server.serve_forever()
def unicode_urlencode(obj, charset='utf-8'): """URL escapes a single bytestring or unicode string with the given charset if applicable to URL safe quoting under all rules that need to be considered under all supported Python versions. If non strings are provided they are converted to their unicode representation first. """ if not isinstance(obj, string_types): obj = text_type(obj) if isinstance(obj, text_type): obj = obj.encode(charset) return text_type(url_quote(obj))
def matches_requirement(req, wheels): """List of wheels matching a requirement. :param req: The requirement to satisfy :param wheels: List of wheels to search. """ try: from pkg_resources import Distribution, Requirement except ImportError: raise RuntimeError("Cannot use requirements without pkg_resources") req = Requirement.parse(req) selected = [] for wf in wheels: f = wf.parsed_filename dist = Distribution(project_name=f.group("name"), version=f.group("ver")) if dist in req: selected.append(wf) return selected
def populate_requirement_set(requirement_set, args, options, finder, session, name, wheel_cache): """ Marshal cmd line args into a requirement set. """ for req in args: requirement_set.add_requirement( InstallRequirement.from_line( req, None, isolated=options.isolated_mode, wheel_cache=wheel_cache ) ) for req in options.editables: requirement_set.add_requirement( InstallRequirement.from_editable( req, default_vcs=options.default_vcs, isolated=options.isolated_mode, wheel_cache=wheel_cache ) ) found_req_in_file = False for filename in options.requirements: for req in parse_requirements( filename, finder=finder, options=options, session=session, wheel_cache=wheel_cache): found_req_in_file = True requirement_set.add_requirement(req) if not (args or options.editables or found_req_in_file): opts = {'name': name} if options.find_links: msg = ('You must give at least one requirement to ' '%(name)s (maybe you meant "pip %(name)s ' '%(links)s"?)' % dict(opts, links=' '.join(options.find_links))) else: msg = ('You must give at least one requirement ' 'to %(name)s (see "pip help %(name)s")' % opts) logger.warning(msg)
def call(__self, __obj, *args, **kwargs): """Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or :func:`environmentfunction`. """ if __debug__: __traceback_hide__ = True # Allow callable classes to take a context fn = __obj.__call__ for fn_type in ('contextfunction', 'evalcontextfunction', 'environmentfunction'): if hasattr(fn, fn_type): __obj = fn break if isinstance(__obj, _context_function_types): if getattr(__obj, 'contextfunction', 0): args = (__self,) + args elif getattr(__obj, 'evalcontextfunction', 0): args = (__self.eval_ctx,) + args elif getattr(__obj, 'environmentfunction', 0): args = (__self.environment,) + args try: return __obj(*args, **kwargs) except StopIteration: return __self.environment.undefined('value was undefined because ' 'a callable raised a ' 'StopIteration exception')
def export(self, location): """ Export the Bazaar repository at the url to the destination location """ temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) if os.path.exists(location): # Remove the location to make sure Bazaar can export it correctly rmtree(location) try: self.run_command(['export', location], cwd=temp_dir, show_stdout=False) finally: rmtree(temp_dir)
def pip_version_check(session): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix of the pip script path. """ import pip # imported here to prevent circular imports pypi_version = None try: state = load_selfcheck_statefile() current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state if "last_check" in state.state and "pypi_version" in state.state: last_check = datetime.datetime.strptime( state.state["last_check"], SELFCHECK_DATE_FMT ) if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: resp = session.get( PyPI.pip_json_url, headers={"Accept": "application/json"}, ) resp.raise_for_status() pypi_version = [ v for v in sorted( list(resp.json()["releases"]), key=packaging_version.parse, ) if not packaging_version.parse(v).is_prerelease ][-1] # save that we've performed a check state.save(pypi_version, current_time) pip_version = packaging_version.parse(pip.__version__) remote_version = packaging_version.parse(pypi_version) # Determine if our pypi_version is older if (pip_version < remote_version and pip_version.base_version != remote_version.base_version): # Advise "python -m pip" on Windows to avoid issues # with overwriting pip.exe. if WINDOWS: pip_cmd = "python -m pip" else: pip_cmd = "pip" logger.warning( "You are using pip version %s, however version %s is " "available.\nYou should consider upgrading via the " "'%s install --upgrade pip' command." % (pip.__version__, pypi_version, pip_cmd) ) except Exception: logger.debug( "There was an error checking the latest version of pip", exc_info=True, )
def lookup(self, ResponseGroup="Large", **kwargs): """Lookup an Amazon Product. :return: An instance of :class:`~.AmazonProduct` if one item was returned, or a list of :class:`~.AmazonProduct` instances if multiple items where returned. """ response = self.api.ItemLookup(ResponseGroup=ResponseGroup, **kwargs) root = objectify.fromstring(response) if root.Items.Request.IsValid == 'False': code = root.Items.Request.Errors.Error.Code msg = root.Items.Request.Errors.Error.Message raise LookupException( "Amazon Product Lookup Error: '{0}', '{1}'".format(code, msg)) if not hasattr(root.Items, 'Item'): raise AsinNotFound("ASIN(s) not found: '{0}'".format( etree.tostring(root, pretty_print=True))) if len(root.Items.Item) > 1: return [ AmazonProduct( item, self.aws_associate_tag, self, region=self.region) for item in root.Items.Item ] else: return AmazonProduct( root.Items.Item, self.aws_associate_tag, self, region=self.region )
def iterate_pages(self): """Iterate Pages. A generator which iterates over all pages. Keep in mind that Amazon limits the number of pages it makes available. :return: Yields lxml root elements. """ try: while True: yield self._query(ItemPage=self.current_page, **self.kwargs) self.current_page += 1 except NoMorePages: pass
def _query(self, ResponseGroup="Large", **kwargs): """Query. Query Amazon search and check for errors. :return: An lxml root element. """ response = self.api.ItemSearch(ResponseGroup=ResponseGroup, **kwargs) root = objectify.fromstring(response) if root.Items.Request.IsValid == 'False': code = root.Items.Request.Errors.Error.Code msg = root.Items.Request.Errors.Error.Message if code == 'AWS.ParameterOutOfRange': raise NoMorePages(msg) else: raise SearchException( "Amazon Search Error: '{0}', '{1}'".format(code, msg)) return root
def ancestor(self): """This browse node's immediate ancestor in the browse node tree. :return: The ancestor as an :class:`~.AmazonBrowseNode`, or None. """ ancestors = getattr(self.element, 'Ancestors', None) if hasattr(ancestors, 'BrowseNode'): return AmazonBrowseNode(ancestors['BrowseNode']) return None
def children(self): """This browse node's children in the browse node tree. :return: A list of this browse node's children in the browse node tree. """ children = [] child_nodes = getattr(self.element, 'Children') for child in getattr(child_nodes, 'BrowseNode', []): children.append(AmazonBrowseNode(child)) return children
def _safe_get_element(self, path, root=None): """Safe Get Element. Get a child element of root (multiple levels deep) failing silently if any descendant does not exist. :param root: Lxml element. :param path: String path (i.e. 'Items.Item.Offers.Offer'). :return: Element or None. """ elements = path.split('.') parent = root if root is not None else self.item for element in elements[:-1]: parent = getattr(parent, element, None) if parent is None: return None return getattr(parent, elements[-1], None)
def _safe_get_element_text(self, path, root=None): """Safe get element text. Get element as string or None, :param root: Lxml element. :param path: String path (i.e. 'Items.Item.Offers.Offer'). :return: String or None. """ element = self._safe_get_element(path, root) if element: return element.text else: return None
def _safe_get_element_date(self, path, root=None): """Safe get elemnent date. Get element as datetime.date or None, :param root: Lxml element. :param path: String path (i.e. 'Items.Item.Offers.Offer'). :return: datetime.date or None. """ value = self._safe_get_element_text(path=path, root=root) if value is not None: try: value = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: value = None return value
def price_and_currency(self): """Get Offer Price and Currency. Return price according to the following process: * If product has a sale return Sales Price, otherwise, * Return Price, otherwise, * Return lowest offer price, otherwise, * Return None. :return: A tuple containing: 1. Float representation of price. 2. ISO Currency code (string). """ price = self._safe_get_element_text( 'Offers.Offer.OfferListing.SalePrice.Amount') if price: currency = self._safe_get_element_text( 'Offers.Offer.OfferListing.SalePrice.CurrencyCode') else: price = self._safe_get_element_text( 'Offers.Offer.OfferListing.Price.Amount') if price: currency = self._safe_get_element_text( 'Offers.Offer.OfferListing.Price.CurrencyCode') else: price = self._safe_get_element_text( 'OfferSummary.LowestNewPrice.Amount') currency = self._safe_get_element_text( 'OfferSummary.LowestNewPrice.CurrencyCode') if price: return float(price) / 100, currency else: return None, None
def list_price(self): """List Price. :return: A tuple containing: 1. Float representation of price. 2. ISO Currency code (string). """ price = self._safe_get_element_text('ItemAttributes.ListPrice.Amount') currency = self._safe_get_element_text( 'ItemAttributes.ListPrice.CurrencyCode') if price: return float(price) / 100, currency else: return None, None
def send(self, request, **kw): """ Send a request. Use the request information to see if it exists in the cache and cache the response if we need to and can. """ if request.method == 'GET': cached_response = self.controller.cached_request(request) if cached_response: return self.build_response(request, cached_response, from_cache=True) # check for etags and add headers if appropriate request.headers.update( self.controller.conditional_headers(request) ) resp = super(CacheControlAdapter, self).send(request, **kw) return resp
def build_response(self, request, response, from_cache=False): """ Build a response by making a request or using the cache. This will end up calling send and returning a potentially cached response """ if not from_cache and request.method == 'GET': # apply any expiration heuristics if response.status == 304: # We must have sent an ETag request. This could mean # that we've been expired already or that we simply # have an etag. In either case, we want to try and # update the cache if that is the case. cached_response = self.controller.update_cached_response( request, response ) if cached_response is not response: from_cache = True # We are done with the server response, read a # possible response body (compliant servers will # not return one, but we cannot be 100% sure) and # release the connection back to the pool. response.read(decode_content=False) response.release_conn() response = cached_response # We always cache the 301 responses elif response.status == 301: self.controller.cache_response(request, response) else: # Check for any heuristics that might update headers # before trying to cache. if self.heuristic: response = self.heuristic.apply(response) # Wrap the response file with a wrapper that will cache the # response when the stream has been consumed. response._fp = CallbackFileWrapper( response._fp, functools.partial( self.controller.cache_response, request, response, ) ) resp = super(CacheControlAdapter, self).build_response( request, response ) # See if we should invalidate the cache. if request.method in self.invalidating_methods and resp.ok: cache_url = self.controller.cache_url(request.url) self.cache.delete(cache_url) # Give the request a from_cache attr to let people use it resp.from_cache = from_cache return resp
def make_attrgetter(environment, attribute): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ if not isinstance(attribute, string_types) \ or ('.' not in attribute and not attribute.isdigit()): return lambda x: environment.getitem(x, attribute) attribute = attribute.split('.') def attrgetter(item): for part in attribute: if part.isdigit(): part = int(part) item = environment.getitem(item, part) return item return attrgetter
def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ rv = [] for item in re.compile(r'([-\s]+)(?u)').split(s): if not item: continue rv.append(item[0].upper() + item[1:].lower()) return ''.join(rv)
def do_dictsort(value, case_sensitive=False, by='key'): """Sort a dict and yield (key, value) pairs. Because python dicts are unsorted you may want to use this function to order them by either key or value: .. sourcecode:: jinja {% for item in mydict|dictsort %} sort the dict by key, case insensitive {% for item in mydict|dictsort(true) %} sort the dict by key, case sensitive {% for item in mydict|dictsort(false, 'value') %} sort the dict by key, case insensitive, sorted normally and ordered by value. """ if by == 'key': pos = 0 elif by == 'value': pos = 1 else: raise FilterArgumentError('You can only sort by either ' '"key" or "value"') def sort_func(item): value = item[pos] if isinstance(value, string_types) and not case_sensitive: value = value.lower() return value return sorted(value.items(), key=sort_func)
def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): """Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added. """ if not case_sensitive: def sort_func(item): if isinstance(item, string_types): item = item.lower() return item else: sort_func = None if attribute is not None: getter = make_attrgetter(environment, attribute) def sort_func(item, processor=sort_func or (lambda x: x)): return processor(getter(item)) return sorted(value, key=sort_func, reverse=reverse)
def do_groupby(environment, value, attribute): """Group a sequence of objects by a common attribute. If you for example have a list of dicts or objects that represent persons with `gender`, `first_name` and `last_name` attributes and you want to group all users by genders you can do something like the following snippet: .. sourcecode:: html+jinja <ul> {% for group in persons|groupby('gender') %} <li>{{ group.grouper }}<ul> {% for person in group.list %} <li>{{ person.first_name }} {{ person.last_name }}</li> {% endfor %}</ul></li> {% endfor %} </ul> Additionally it's possible to use tuple unpacking for the grouper and list: .. sourcecode:: html+jinja <ul> {% for grouper, list in persons|groupby('gender') %} ... {% endfor %} </ul> As you can see the item we're grouping by is stored in the `grouper` attribute and the `list` contains all the objects that have this grouper in common. .. versionchanged:: 2.6 It's now possible to use dotted notation to group by the child attribute of another attribute. """ expr = make_attrgetter(environment, attribute) return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} .. versionadded:: 2.7 """ context = args[0] seq = args[1] if len(args) == 2 and 'attribute' in kwargs: attribute = kwargs.pop('attribute') if kwargs: raise FilterArgumentError('Unexpected keyword argument %r' % next(iter(kwargs))) func = make_attrgetter(context.environment, attribute) else: try: name = args[2] args = args[3:] except LookupError: raise FilterArgumentError('map requires a filter argument') func = lambda item: context.environment.call_filter( name, item, args, kwargs, context=context) if seq: for item in seq: yield func(item)
def create_logger(app): """Creates a logger for the given application. This logger works similar to a regular Python logger but changes the effective logging level based on the application's debug flag. Furthermore this function also removes all attached handlers in case there was a logger with the log name before. """ Logger = getLoggerClass() class DebugLogger(Logger): def getEffectiveLevel(x): if x.level == 0 and app.debug: return DEBUG return Logger.getEffectiveLevel(x) class DebugHandler(StreamHandler): def emit(x, record): StreamHandler.emit(x, record) if app.debug else None handler = DebugHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(app.debug_log_format)) logger = getLogger(app.logger_name) # just in case that was not a new logger, get rid of all the handlers # already attached to it. del logger.handlers[:] logger.__class__ = DebugLogger logger.addHandler(handler) return logger
def constant_time_compare(val1, val2): """Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. """ if _builtin_constant_time_compare is not None: return _builtin_constant_time_compare(val1, val2) len_eq = len(val1) == len(val2) if len_eq: result = 0 left = val1 else: result = 1 left = val2 for x, y in izip(bytearray(left), bytearray(val2)): result |= x ^ y return result == 0
def base64_encode(string): """base64 encodes a single bytestring (and is tolerant to getting called with a unicode string). The resulting bytestring is safe for putting into URLs. """ string = want_bytes(string) return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string): """base64 decodes a single bytestring (and is tolerant to getting called with a unicode string). The result is also a bytestring. """ string = want_bytes(string, encoding='ascii', errors='ignore') return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def verify_signature(self, key, value, sig): """Verifies the given signature matches the expected signature""" return constant_time_compare(sig, self.get_signature(key, value))
def derive_key(self): """This method is called to derive the key. If you're unhappy with the default key derivation choices you can override them here. Keep in mind that the key derivation in itsdangerous is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys. """ salt = want_bytes(self.salt) if self.key_derivation == 'concat': return self.digest_method(salt + self.secret_key).digest() elif self.key_derivation == 'django-concat': return self.digest_method(salt + b'signer' + self.secret_key).digest() elif self.key_derivation == 'hmac': mac = hmac.new(self.secret_key, digestmod=self.digest_method) mac.update(salt) return mac.digest() elif self.key_derivation == 'none': return self.secret_key else: raise TypeError('Unknown key derivation method')
def get_signature(self, value): """Returns the signature for the given value""" value = want_bytes(value) key = self.derive_key() sig = self.algorithm.get_signature(key, value) return base64_encode(sig)
def sign(self, value): """Signs the given string.""" return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig): """Verifies the signature for the given value.""" key = self.derive_key() try: sig = base64_decode(sig) except Exception: return False return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value): """Unsigns the given string.""" signed_value = want_bytes(signed_value) sep = want_bytes(self.sep) if sep not in signed_value: raise BadSignature('No %r found in value' % self.sep) value, sig = signed_value.rsplit(sep, 1) if self.verify_signature(value, sig): return value raise BadSignature('Signature %r does not match' % sig, payload=value)
def sign(self, value): """Signs the given string and also attaches a time information.""" value = want_bytes(value) timestamp = base64_encode(int_to_bytes(self.get_timestamp())) sep = want_bytes(self.sep) value = value + sep + timestamp return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False): """Works like the regular :meth:`~Signer.unsign` but can also validate the time. See the base docstring of the class for the general behavior. If `return_timestamp` is set to `True` the timestamp of the signature will be returned as naive :class:`datetime.datetime` object in UTC. """ try: result = Signer.unsign(self, value) sig_error = None except BadSignature as e: sig_error = e result = e.payload or b'' sep = want_bytes(self.sep) # If there is no timestamp in the result there is something # seriously wrong. In case there was a signature error, we raise # that one directly, otherwise we have a weird situation in which # we shouldn't have come except someone uses a time-based serializer # on non-timestamp data, so catch that. if not sep in result: if sig_error: raise sig_error raise BadTimeSignature('timestamp missing', payload=result) value, timestamp = result.rsplit(sep, 1) try: timestamp = bytes_to_int(base64_decode(timestamp)) except Exception: timestamp = None # Signature is *not* okay. Raise a proper error now that we have # split the value and the timestamp. if sig_error is not None: raise BadTimeSignature(text_type(sig_error), payload=value, date_signed=timestamp) # Signature was okay but the timestamp is actually not there or # malformed. Should not happen, but well. We handle it nonetheless if timestamp is None: raise BadTimeSignature('Malformed timestamp', payload=value) # Check timestamp is not older than max_age if max_age is not None: age = self.get_timestamp() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age), payload=value, date_signed=self.timestamp_to_datetime(timestamp)) if return_timestamp: return value, self.timestamp_to_datetime(timestamp) return value
def validate(self, signed_value, max_age=None): """Just validates the given signed value. Returns `True` if the signature exists and is valid, `False` otherwise.""" try: self.unsign(signed_value, max_age=max_age) return True except BadSignature: return False
def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`BadPayload` if the payload is not valid. The `serializer` parameter can be used to override the serializer stored on the class. The encoded payload is always byte based. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: payload = payload.decode('utf-8') return serializer.loads(payload) except Exception as e: raise BadPayload('Could not load the payload because an ' 'exception occurred on unserializing the data', original_error=e)
def make_signer(self, salt=None): """A method that creates a new instance of the signer to be used. The default implementation uses the :class:`Signer` baseclass. """ if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None): """Returns a signed string serialized with the internal serializer. The return value can be either a byte or unicode string depending on the format of the internal serializer. """ payload = want_bytes(self.dump_payload(obj)) rv = self.make_signer(salt).sign(payload) if self.is_text_serializer: rv = rv.decode('utf-8') return rv
def dump(self, obj, f, salt=None): """Like :meth:`dumps` but dumps into a file. The file handle has to be compatible with what the internal serializer expects. """ f.write(self.dumps(obj, salt))
def loads(self, s, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. """ s = want_bytes(s) return self.load_payload(self.make_signer(salt).unsign(s))
def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None): """Lowlevel helper function to implement :meth:`loads_unsafe` in serializer subclasses. """ try: return True, self.loads(s, salt=salt, **(load_kwargs or {})) except BadSignature as e: if e.payload is None: return False, None try: return False, self.load_payload(e.payload, **(load_payload_kwargs or {})) except BadPayload: return False, None
def load_unsafe(self, f, *args, **kwargs): """Like :meth:`loads_unsafe` but loads from a file. .. versionadded:: 0.15 """ return self.loads_unsafe(f.read(), *args, **kwargs)
def loads(self, s, max_age=None, return_timestamp=False, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. If a `max_age` is provided it will ensure the signature is not older than that time in seconds. In case the signature is outdated, :exc:`SignatureExpired` is raised which is a subclass of :exc:`BadSignature`. All arguments are forwarded to the signer's :meth:`~TimestampSigner.unsign` method. """ base64d, timestamp = self.make_signer(salt) \ .unsign(s, max_age, return_timestamp=True) payload = self.load_payload(base64d) if return_timestamp: return payload, timestamp return payload
def dumps(self, obj, salt=None, header_fields=None): """Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It also allows for specifying additional fields to be included in the JWS Header. """ header = self.make_header(header_fields) signer = self.make_signer(salt, self.algorithm) return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via `return_header` it will return a tuple of payload and header. """ payload, header = self.load_payload( self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), return_header=True) if header.get('alg') != self.algorithm_name: raise BadHeader('Algorithm mismatch', header=header, payload=payload) if return_header: return payload, header return payload
def server_error(request_id, error): """JSON-RPC server error. :param request_id: JSON-RPC request id :type request_id: int or str or None :param error: server error :type error: Exception """ response = { 'jsonrpc': '2.0', 'id': request_id, 'error': { 'code': -32000, 'message': 'Server error', 'data': repr(error), }, } raise ServiceException(500, dumps(response))
def findall(dir = os.curdir): """Find all files under 'dir' and return the list of full filenames (relative to 'dir'). """ all_files = [] for base, dirs, files in os.walk(dir, followlinks=True): if base==os.curdir or base.startswith(os.curdir+os.sep): base = base[2:] if base: files = [os.path.join(base, f) for f in files] all_files.extend(filter(os.path.isfile, files)) return all_files
def find(cls, where='.', exclude=(), include=('*',)): """Return a list all Python packages found within directory 'where' 'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it will be converted to the appropriate local path syntax. 'exclude' is a sequence of package names to exclude; '*' can be used as a wildcard in the names, such that 'foo.*' will exclude all subpackages of 'foo' (but not 'foo' itself). 'include' is a sequence of package names to include. If it's specified, only the named packages will be included. If it's not specified, all found packages will be included. 'include' can contain shell style wildcard patterns just like 'exclude'. The list of included packages is built up first and then any explicitly excluded packages are removed from it. """ out = cls._find_packages_iter(convert_path(where)) out = cls.require_parents(out) includes = cls._build_filter(*include) excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude) out = filter(includes, out) out = filterfalse(excludes, out) return list(out)
def require_parents(packages): """ Exclude any apparent package that apparently doesn't include its parent. For example, exclude 'foo.bar' if 'foo' is not present. """ found = [] for pkg in packages: base, sep, child = pkg.rpartition('.') if base and base not in found: continue found.append(pkg) yield pkg
def _all_dirs(base_path): """ Return all dirs in base_path, relative to base_path """ for root, dirs, files in os.walk(base_path, followlinks=True): for dir in dirs: yield os.path.relpath(os.path.join(root, dir), base_path)
def prepare_response(self, request, cached): """Verify our vary headers match and construct a real urllib3 HTTPResponse object. """ # Special case the '*' Vary value as it means we cannot actually # determine if the cached response is suitable for this request. if "*" in cached.get("vary", {}): return # Ensure that the Vary headers for the cached response match our # request for header, value in cached.get("vary", {}).items(): if request.headers.get(header, None) != value: return body_raw = cached["response"].pop("body") try: body = io.BytesIO(body_raw) except TypeError: # This can happen if cachecontrol serialized to v1 format (pickle) # using Python 2. A Python 2 str(byte string) will be unpickled as # a Python 3 str (unicode string), which will cause the above to # fail with: # # TypeError: 'str' does not support the buffer interface body = io.BytesIO(body_raw.encode('utf8')) return HTTPResponse( body=body, preload_content=False, **cached["response"] )
def keygen(get_keyring=get_keyring): """Generate a public/private key pair.""" WheelKeys, keyring = get_keyring() ed25519ll = signatures.get_ed25519ll() wk = WheelKeys().load() keypair = ed25519ll.crypto_sign_keypair() vk = native(urlsafe_b64encode(keypair.vk)) sk = native(urlsafe_b64encode(keypair.sk)) kr = keyring.get_keyring() kr.set_password("wheel", vk, sk) sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk)) if isinstance(kr, keyring.backends.file.BaseKeyring): sys.stdout.write("in {0}\n".format(kr.file_path)) else: sys.stdout.write("in %r\n" % kr.__class__) sk2 = kr.get_password('wheel', vk) if sk2 != sk: raise WheelError("Keyring is broken. Could not retrieve secret key.") sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk)) wk.add_signer('+', vk) wk.trust('+', vk) wk.save()
def unsign(wheelfile): """ Remove RECORD.jws from a wheel by truncating the zip file. RECORD.jws must be at the end of the archive. The zip file must be an ordinary archive, with the compressed files and the directory in the same order, and without any non-zip content after the truncation point. """ import wheel.install vzf = wheel.install.VerifyingZipFile(wheelfile, "a") info = vzf.infolist() if not (len(info) and info[-1].filename.endswith('/RECORD.jws')): raise WheelError("RECORD.jws not found at end of archive.") vzf.pop() vzf.close()
def verify(wheelfile): """Verify a wheel. The signature will be verified for internal consistency ONLY and printed. Wheel's own unpack/install commands verify the manifest against the signature and file contents. """ wf = WheelFile(wheelfile) sig_name = wf.distinfo_name + '/RECORD.jws' sig = json.loads(native(wf.zipfile.open(sig_name).read())) verified = signatures.verify(sig) sys.stderr.write("Signatures are internally consistent.\n") sys.stdout.write(json.dumps(verified, indent=2)) sys.stdout.write('\n')
def unpack(wheelfile, dest='.'): """Unpack a wheel. Wheel content will be unpacked to {dest}/{name}-{ver}, where {name} is the package name and {ver} its version. :param wheelfile: The path to the wheel. :param dest: Destination directory (default to current directory). """ wf = WheelFile(wheelfile) namever = wf.parsed_filename.group('namever') destination = os.path.join(dest, namever) sys.stderr.write("Unpacking to: %s\n" % (destination)) wf.zipfile.extractall(destination) wf.zipfile.close()
def install(requirements, requirements_file=None, wheel_dirs=None, force=False, list_files=False, dry_run=False): """Install wheels. :param requirements: A list of requirements or wheel files to install. :param requirements_file: A file containing requirements to install. :param wheel_dirs: A list of directories to search for wheels. :param force: Install a wheel file even if it is not compatible. :param list_files: Only list the files to install, don't install them. :param dry_run: Do everything but the actual install. """ # If no wheel directories specified, use the WHEELPATH environment # variable, or the current directory if that is not set. if not wheel_dirs: wheelpath = os.getenv("WHEELPATH") if wheelpath: wheel_dirs = wheelpath.split(os.pathsep) else: wheel_dirs = [ os.path.curdir ] # Get a list of all valid wheels in wheel_dirs all_wheels = [] for d in wheel_dirs: for w in os.listdir(d): if w.endswith('.whl'): wf = WheelFile(os.path.join(d, w)) if wf.compatible: all_wheels.append(wf) # If there is a requirements file, add it to the list of requirements if requirements_file: # If the file doesn't exist, search for it in wheel_dirs # This allows standard requirements files to be stored with the # wheels. if not os.path.exists(requirements_file): for d in wheel_dirs: name = os.path.join(d, requirements_file) if os.path.exists(name): requirements_file = name break with open(requirements_file) as fd: requirements.extend(fd) to_install = [] for req in requirements: if req.endswith('.whl'): # Explicitly specified wheel filename if os.path.exists(req): wf = WheelFile(req) if wf.compatible or force: to_install.append(wf) else: msg = ("{0} is not compatible with this Python. " "--force to install anyway.".format(req)) raise WheelError(msg) else: # We could search on wheel_dirs, but it's probably OK to # assume the user has made an error. raise WheelError("No such wheel file: {}".format(req)) continue # We have a requirement spec # If we don't have pkg_resources, this will raise an exception matches = matches_requirement(req, all_wheels) if not matches: raise WheelError("No match for requirement {}".format(req)) to_install.append(max(matches)) # We now have a list of wheels to install if list_files: sys.stdout.write("Installing:\n") if dry_run: return for wf in to_install: if list_files: sys.stdout.write(" {0}\n".format(wf.filename)) continue wf.install(force=force) wf.zipfile.close()
def install_scripts(distributions): """ Regenerate the entry_points console_scripts for the named distribution. """ try: from setuptools.command import easy_install import pkg_resources except ImportError: raise RuntimeError("'wheel install_scripts' needs setuptools.") for dist in distributions: pkg_resources_dist = pkg_resources.get_distribution(dist) install = wheel.paths.get_install_command(dist) command = easy_install.easy_install(install.distribution) command.args = ['wheel'] # dummy argument command.finalize_options() command.install_egg_scripts(pkg_resources_dist)
def arrange_all(self): """ Sets for the _draw_ and _ldraw_ attributes for each of the graph sub-elements by processing the xdot format of the graph. """ import godot.dot_data_parser parser = godot.dot_data_parser.GodotDataParser() xdot_data = self.create( format = "xdot" ) # print "GRAPH DOT:\n", str( self ) # print "XDOT DATA:\n", xdot_data parser.dotparser.parseWithTabs() ndata = xdot_data.replace( "\\\n", "" ) tokens = parser.dotparser.parseString( ndata )[0] parser.build_graph( graph=self, tokens=tokens[3] ) self.redraw_canvas()
def redraw_canvas(self): """ Parses the Xdot attributes of all graph components and adds the components to a new canvas. """ from xdot_parser import XdotAttrParser xdot_parser = XdotAttrParser() canvas = self._component_default() for node in self.nodes: components = xdot_parser.parse_xdot_data( node._draw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( node._ldraw_ ) canvas.add( *components ) for edge in self.edges: components = xdot_parser.parse_xdot_data( edge._draw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( edge._ldraw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( edge._hdraw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( edge._tdraw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( edge._hldraw_ ) canvas.add( *components ) components = xdot_parser.parse_xdot_data( edge._tldraw_ ) canvas.add( *components ) self.component = canvas self.vp.request_redraw()
def get_node(self, ID): """ Returns a node given an ID or None if no such node exists. """ node = super(Graph, self).get_node(ID) if node is not None: return node for graph in self.all_graphs: for each_node in graph.nodes: if each_node.ID == ID: return each_node else: return None
def _maxiter_default(self): """ Trait initialiser. """ mode = self.mode if mode == "KK": return 100 * len(self.nodes) elif mode == "major": return 200 else: return 600
def _get_all_graphs(self): """ Property getter. """ top_graph = self def get_subgraphs(graph): assert isinstance(graph, BaseGraph) subgraphs = graph.subgraphs[:] for subgraph in graph.subgraphs: subsubgraphs = get_subgraphs(subgraph) subgraphs.extend(subsubgraphs) return subgraphs subgraphs = get_subgraphs(top_graph) return [top_graph] + subgraphs
def _directed_changed(self, new): """ Sets the connection string for all edges. """ if new: conn = "->" else: conn = "--" for edge in [e for g in self.all_graphs for e in g.edges]: edge.conn = conn
def _on_nodes(self): """ Maintains each branch's list of available nodes in order that they may move themselves (InstanceEditor values). """ all_graphs = self.all_graphs all_nodes = [n for g in all_graphs for n in g.nodes] for graph in all_graphs: for edge in graph.edges: edge._nodes = all_nodes
def _on_edges(self, object, name, old, new): """ Handles the list of edges for any graph changing. """ if name == "edges_items": edges = new.added elif name == "edges": edges = new else: edges = [] all_nodes = [n for g in self.all_graphs for n in g.nodes] for each_edge in edges: # Ensure the edge's nodes exist in the graph. if each_edge.tail_node not in all_nodes: object.nodes.append( each_edge.tail_node ) if each_edge.head_node not in all_nodes: object.nodes.append( each_edge.head_node ) # Initialise the edge's list of available nodes. each_edge._nodes = all_nodes
def _support(self, caller): """Helper callback.""" markdown_content = caller() html_content = markdown.markdown( markdown_content, extensions=[ "markdown.extensions.fenced_code", CodeHiliteExtension(css_class="highlight"), "markdown.extensions.tables", ], ) return html_content
def _viewport_default(self): """ Trait initialiser """ viewport = Viewport(component=self.canvas, enable_zoom=True) viewport.tools.append(ViewportPanTool(viewport)) return viewport
def _component_changed(self, old, new): """ Handles the component being changed. """ canvas = self.canvas if old is not None: canvas.remove(old) if new is not None: canvas.add(new)
def _code_support(self, language, caller): """Helper callback.""" code = caller() # remove the leading whitespace so the whole block can be indented more easily with flow of page lines = code.splitlines() first_nonempty_line_index = 0 while not lines[first_nonempty_line_index]: first_nonempty_line_index += 1 len_to_trim = len(lines[first_nonempty_line_index]) - len( lines[first_nonempty_line_index].lstrip() ) lines = [x[len_to_trim:] for x in lines] code = "\n".join(lines) if language: lexer = get_lexer_by_name(language, stripall=True) else: lexer = guess_lexer(code) highlighted = highlight(code, lexer, HtmlFormatter()) return highlighted
def normal_left_dclick(self, event): """ Handles the left mouse button being double-clicked when the tool is in the 'normal' state. If the event occurred on this tool's component (or any contained component of that component), the method opens a Traits UI view on the object referenced by the 'element' trait of the component that was double-clicked, setting the tool as the active tool for the duration of the view. """ x = event.x y = event.y # First determine what component or components we are going to hittest # on. If our component is a container, then we add its non-container # components to the list of candidates. # candidates = [] component = self.component # if isinstance(component, Container): # candidates = get_nested_components(self.component) # else: # # We don't support clicking on unrecognized components # return # # # Hittest against all the candidate and take the first one # item = None # for candidate, offset in candidates: # if candidate.is_in(x-offset[0], y-offset[1]): # item = candidate # break if hasattr(component, "element"): if component.element is not None: component.active_tool = self component.element.edit_traits(kind="livemodal") event.handled = True component.active_tool = None component.request_redraw() return
def _diagram_canvas_default(self): """ Trait initialiser """ canvas = Canvas() for tool in self.tools: canvas.tools.append(tool(canvas)) return canvas
def _viewport_default(self): """ Trait initialiser """ vp = Viewport(component=self.diagram_canvas, enable_zoom=True) vp.view_position = [0,0] vp.tools.append(ViewportPanTool(vp)) return vp
def _diagram_canvas_changed(self, new): """ Handles the diagram canvas being set """ logger.debug("Diagram canvas changed!") canvas = self.diagram_canvas for tool in self.tools: if canvas is not None: print "Adding tool: %s" % tool canvas.tools.append(tool(canvas))
def clear_canvas(self): """ Removes all components from the canvas """ logger.debug("Clearing the diagram canvas!") old_canvas = self.diagram_canvas # logger.debug("Canvas components: %s" % canvas.components) # for component in canvas.components: # canvas.remove(component) # logger.debug("Canvas components: %s" % canvas.components) # for component in canvas.components: # canvas.remove(component) # logger.debug("Canvas components: %s" % canvas.components) # canvas.request_redraw() new_canvas = Canvas() new_canvas.copy_traits(old_canvas, ["bgcolor", "draw_axes"]) self.diagram_canvas = new_canvas self.viewport.component=new_canvas self.viewport.request_redraw() return